From eceaf38f521982bad6dbac1c02becdd80fd6af7c Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:34 +0000 Subject: KVM: arm64: Rename .idmap.text in hyp linker script So far hyp-init.S created a .hyp.idmap.text section directly, without relying on the hyp linker script to prefix its name. Change it to create .idmap.text and add a HYP_SECTION entry to hyp.lds.S. This way all .hyp* sections go through the linker script and can be instrumented there. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-2-dbrazdil@google.com --- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 2 +- arch/arm64/kvm/hyp/nvhe/hyp.lds.S | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 31b060a44045..68fd64f2313e 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -18,7 +18,7 @@ #include .text - .pushsection .hyp.idmap.text, "ax" + .pushsection .idmap.text, "ax" .align 11 diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index 1206d0d754d5..70ac48ccede7 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -12,6 +12,7 @@ #include SECTIONS { + HYP_SECTION(.idmap.text) HYP_SECTION(.text) /* * .hyp..data..percpu needs to be page aligned to maintain the same -- cgit v1.2.3 From 16174eea2e4fe8247e04c17da682f2034fec0369 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:35 +0000 Subject: KVM: arm64: Set up .hyp.rodata ELF section We will need to recognize pointers in .rodata specific to hyp, so establish a .hyp.rodata ELF section. Merge it with the existing .hyp.data..ro_after_init as they are treated the same at runtime. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-3-dbrazdil@google.com --- arch/arm64/include/asm/sections.h | 2 +- arch/arm64/kernel/vmlinux.lds.S | 7 ++++--- arch/arm64/kvm/arm.c | 7 +++---- arch/arm64/kvm/hyp/nvhe/hyp.lds.S | 4 +++- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 8ff579361731..a6f3557d1ab2 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -11,7 +11,7 @@ extern char __alt_instructions[], __alt_instructions_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; -extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[]; +extern char __hyp_rodata_start[], __hyp_rodata_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 4c0b0c89ad59..9672b54bba7c 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -31,10 +31,11 @@ jiffies = jiffies_64; __stop___kvm_ex_table = .; #define HYPERVISOR_DATA_SECTIONS \ - HYP_SECTION_NAME(.data..ro_after_init) : { \ - __hyp_data_ro_after_init_start = .; \ + HYP_SECTION_NAME(.rodata) : { \ + __hyp_rodata_start = .; \ *(HYP_SECTION_NAME(.data..ro_after_init)) \ - __hyp_data_ro_after_init_end = .; \ + *(HYP_SECTION_NAME(.rodata)) \ + __hyp_rodata_end = .; \ } #define HYPERVISOR_PERCPU_SECTION \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 04c44853b103..de1af4052780 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1749,11 +1749,10 @@ static int init_hyp_mode(void) goto out_err; } - err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start), - kvm_ksym_ref(__hyp_data_ro_after_init_end), - PAGE_HYP_RO); + err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), + kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); if (err) { - kvm_err("Cannot map .hyp.data..ro_after_init section\n"); + kvm_err("Cannot map .hyp.rodata section\n"); goto out_err; } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index 70ac48ccede7..cfdc59b4329b 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -14,6 +14,9 @@ SECTIONS { HYP_SECTION(.idmap.text) HYP_SECTION(.text) + HYP_SECTION(.data..ro_after_init) + HYP_SECTION(.rodata) + /* * .hyp..data..percpu needs to be page aligned to maintain the same * alignment for when linking into vmlinux. @@ -22,5 +25,4 @@ SECTIONS { HYP_SECTION_NAME(.data..percpu) : { PERCPU_INPUT(L1_CACHE_BYTES) } - HYP_SECTION(.data..ro_after_init) } -- cgit v1.2.3 From f7a4825d9569593b9a81f0768313b86175691ef1 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:36 +0000 Subject: KVM: arm64: Add symbol at the beginning of each hyp section Generating hyp relocations will require referencing positions at a given offset from the beginning of hyp sections. Since the final layout will not be determined until the linking of `vmlinux`, modify the hyp linker script to insert a symbol at the first byte of each hyp section to use as an anchor. The linker of `vmlinux` will place the symbols together with the sections. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-4-dbrazdil@google.com --- arch/arm64/include/asm/hyp_image.h | 29 +++++++++++++++++++++++++++-- arch/arm64/kvm/hyp/nvhe/hyp.lds.S | 4 ++-- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h index daa1a1da539e..737ded6b6d0d 100644 --- a/arch/arm64/include/asm/hyp_image.h +++ b/arch/arm64/include/asm/hyp_image.h @@ -7,6 +7,9 @@ #ifndef __ARM64_HYP_IMAGE_H__ #define __ARM64_HYP_IMAGE_H__ +#define __HYP_CONCAT(a, b) a ## b +#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b) + /* * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, * to separate it from the kernel proper. @@ -21,9 +24,31 @@ */ #define HYP_SECTION_NAME(NAME) .hyp##NAME +/* Symbol defined at the beginning of each hyp section. */ +#define HYP_SECTION_SYMBOL_NAME(NAME) \ + HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME)) + +/* + * Helper to generate linker script statements starting a hyp section. + * + * A symbol with a well-known name is defined at the first byte. This + * is used as a base for hyp relocations (see gen-hyprel.c). It must + * be defined inside the section so the linker of `vmlinux` cannot + * separate it from the section data. + */ +#define BEGIN_HYP_SECTION(NAME) \ + HYP_SECTION_NAME(NAME) : { \ + HYP_SECTION_SYMBOL_NAME(NAME) = .; + +/* Helper to generate linker script statements ending a hyp section. */ +#define END_HYP_SECTION \ + } + /* Defines an ELF hyp section from input section @NAME and its subsections. */ -#define HYP_SECTION(NAME) \ - HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) } +#define HYP_SECTION(NAME) \ + BEGIN_HYP_SECTION(NAME) \ + *(NAME NAME##.*) \ + END_HYP_SECTION /* * Defines a linker script alias of a kernel-proper symbol referenced by diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index cfdc59b4329b..cd119d82d8e3 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -22,7 +22,7 @@ SECTIONS { * alignment for when linking into vmlinux. */ . = ALIGN(PAGE_SIZE); - HYP_SECTION_NAME(.data..percpu) : { + BEGIN_HYP_SECTION(.data..percpu) PERCPU_INPUT(L1_CACHE_BYTES) - } + END_HYP_SECTION } -- cgit v1.2.3 From 8c49b5d43d4c45ca0bb0d1faa23feef2e76e89fa Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:37 +0000 Subject: KVM: arm64: Generate hyp relocation data Add a post-processing step to compilation of KVM nVHE hyp code which calls a custom host tool (gen-hyprel) on the partially linked object file (hyp sections' names prefixed). The tool lists all R_AARCH64_ABS64 data relocations targeting hyp sections and generates an assembly file that will form a new section .hyp.reloc in the kernel binary. The new section contains an array of 32-bit offsets to the positions targeted by these relocations. Since these addresses of those positions will not be determined until linking of `vmlinux`, each 32-bit entry carries a R_AARCH64_PREL32 relocation with addend + . The linker of `vmlinux` will therefore fill the slot accordingly. This relocation data will be used at runtime to convert the kernel VAs at those positions to hyp VAs. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-5-dbrazdil@google.com --- arch/arm64/kernel/vmlinux.lds.S | 11 + arch/arm64/kvm/hyp/nvhe/.gitignore | 2 + arch/arm64/kvm/hyp/nvhe/Makefile | 28 ++- arch/arm64/kvm/hyp/nvhe/gen-hyprel.c | 413 +++++++++++++++++++++++++++++++++++ 4 files changed, 451 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/kvm/hyp/nvhe/gen-hyprel.c diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 9672b54bba7c..636ca45aa1d4 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -43,10 +43,19 @@ jiffies = jiffies_64; HYP_SECTION_NAME(.data..percpu) : { \ *(HYP_SECTION_NAME(.data..percpu)) \ } + +#define HYPERVISOR_RELOC_SECTION \ + .hyp.reloc : ALIGN(4) { \ + __hyp_reloc_begin = .; \ + *(.hyp.reloc) \ + __hyp_reloc_end = .; \ + } + #else /* CONFIG_KVM */ #define HYPERVISOR_EXTABLE #define HYPERVISOR_DATA_SECTIONS #define HYPERVISOR_PERCPU_SECTION +#define HYPERVISOR_RELOC_SECTION #endif #define HYPERVISOR_TEXT \ @@ -217,6 +226,8 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) HYPERVISOR_PERCPU_SECTION + HYPERVISOR_RELOC_SECTION + .rela.dyn : ALIGN(8) { *(.rela .rela*) } diff --git a/arch/arm64/kvm/hyp/nvhe/.gitignore b/arch/arm64/kvm/hyp/nvhe/.gitignore index 695d73d0249e..5b6c43cc96f8 100644 --- a/arch/arm64/kvm/hyp/nvhe/.gitignore +++ b/arch/arm64/kvm/hyp/nvhe/.gitignore @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only +gen-hyprel hyp.lds +hyp-reloc.S diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 1f1e351c5fe2..268be1376f74 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -6,6 +6,8 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ +hostprogs := gen-hyprel + obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ @@ -19,7 +21,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y)) obj-y := kvm_nvhe.o -extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds +extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o # 1) Compile all source files to `.nvhe.o` object files. The file extension # avoids file name clashes for files shared with VHE. @@ -42,11 +44,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE $(call if_changed,ld) -# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'. +# 4) Generate list of hyp code/data positions that need to be relocated at +# runtime. Because the hypervisor is part of the kernel binary, relocations +# produce a kernel VA. We enumerate relocations targeting hyp at build time +# and convert the kernel VAs at those positions to hyp VAs. +$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel + $(call if_changed,hyprel) + +# 5) Compile hyp-reloc.S and link it into the existing partially linked object. +# The object file now contains a section with pointers to hyp positions that +# will contain kernel VAs at runtime. These pointers have relocations on them +# so that they get updated as the hyp object is linked into `vmlinux`. +LDFLAGS_kvm_nvhe.rel.o := -r +$(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE + $(call if_changed,ld) + +# 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'. # Prefixes names of ELF symbols with '__kvm_nvhe_'. -$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE +$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE $(call if_changed,hypcopy) +# The HYPREL command calls `gen-hyprel` to generate an assembly file with +# a list of relocations targeting hyp code/data. +quiet_cmd_hyprel = HYPREL $@ + cmd_hyprel = $(obj)/gen-hyprel $< > $@ + # The HYPCOPY command uses `objcopy` to prefix all ELF symbol names # to avoid clashes with VHE code/data. quiet_cmd_hypcopy = HYPCOPY $@ diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c new file mode 100644 index 000000000000..58fe31fdba8e --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 - Google LLC + * Author: David Brazdil + * + * Generates relocation information used by the kernel to convert + * absolute addresses in hyp data from kernel VAs to hyp VAs. + * + * This is necessary because hyp code is linked into the same binary + * as the kernel but executes under different memory mappings. + * If the compiler used absolute addressing, those addresses need to + * be converted before they are used by hyp code. + * + * The input of this program is the relocatable ELF object containing + * all hyp code/data, not yet linked into vmlinux. Hyp section names + * should have been prefixed with `.hyp` at this point. + * + * The output (printed to stdout) is an assembly file containing + * an array of 32-bit integers and static relocations that instruct + * the linker of `vmlinux` to populate the array entries with offsets + * to positions in the kernel binary containing VAs used by hyp code. + * + * Note that dynamic relocations could be used for the same purpose. + * However, those are only generated if CONFIG_RELOCATABLE=y. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HYP_SECTION_PREFIX ".hyp" +#define HYP_RELOC_SECTION ".hyp.reloc" +#define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_" + +/* + * AArch64 relocation type constants. + * Included in case these are not defined in the host toolchain. + */ +#ifndef R_AARCH64_ABS64 +#define R_AARCH64_ABS64 257 +#endif +#ifndef R_AARCH64_LD_PREL_LO19 +#define R_AARCH64_LD_PREL_LO19 273 +#endif +#ifndef R_AARCH64_ADR_PREL_LO21 +#define R_AARCH64_ADR_PREL_LO21 274 +#endif +#ifndef R_AARCH64_ADR_PREL_PG_HI21 +#define R_AARCH64_ADR_PREL_PG_HI21 275 +#endif +#ifndef R_AARCH64_ADR_PREL_PG_HI21_NC +#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 +#endif +#ifndef R_AARCH64_ADD_ABS_LO12_NC +#define R_AARCH64_ADD_ABS_LO12_NC 277 +#endif +#ifndef R_AARCH64_LDST8_ABS_LO12_NC +#define R_AARCH64_LDST8_ABS_LO12_NC 278 +#endif +#ifndef R_AARCH64_TSTBR14 +#define R_AARCH64_TSTBR14 279 +#endif +#ifndef R_AARCH64_CONDBR19 +#define R_AARCH64_CONDBR19 280 +#endif +#ifndef R_AARCH64_JUMP26 +#define R_AARCH64_JUMP26 282 +#endif +#ifndef R_AARCH64_CALL26 +#define R_AARCH64_CALL26 283 +#endif +#ifndef R_AARCH64_LDST16_ABS_LO12_NC +#define R_AARCH64_LDST16_ABS_LO12_NC 284 +#endif +#ifndef R_AARCH64_LDST32_ABS_LO12_NC +#define R_AARCH64_LDST32_ABS_LO12_NC 285 +#endif +#ifndef R_AARCH64_LDST64_ABS_LO12_NC +#define R_AARCH64_LDST64_ABS_LO12_NC 286 +#endif +#ifndef R_AARCH64_MOVW_PREL_G0 +#define R_AARCH64_MOVW_PREL_G0 287 +#endif +#ifndef R_AARCH64_MOVW_PREL_G0_NC +#define R_AARCH64_MOVW_PREL_G0_NC 288 +#endif +#ifndef R_AARCH64_MOVW_PREL_G1 +#define R_AARCH64_MOVW_PREL_G1 289 +#endif +#ifndef R_AARCH64_MOVW_PREL_G1_NC +#define R_AARCH64_MOVW_PREL_G1_NC 290 +#endif +#ifndef R_AARCH64_MOVW_PREL_G2 +#define R_AARCH64_MOVW_PREL_G2 291 +#endif +#ifndef R_AARCH64_MOVW_PREL_G2_NC +#define R_AARCH64_MOVW_PREL_G2_NC 292 +#endif +#ifndef R_AARCH64_MOVW_PREL_G3 +#define R_AARCH64_MOVW_PREL_G3 293 +#endif +#ifndef R_AARCH64_LDST128_ABS_LO12_NC +#define R_AARCH64_LDST128_ABS_LO12_NC 299 +#endif + +/* Global state of the processed ELF. */ +static struct { + const char *path; + char *begin; + size_t size; + Elf64_Ehdr *ehdr; + Elf64_Shdr *sh_table; + const char *sh_string; +} elf; + +#define fatal_error(fmt, ...) \ + ({ \ + fprintf(stderr, "error: %s: " fmt "\n", \ + elf.path, ## __VA_ARGS__); \ + exit(EXIT_FAILURE); \ + __builtin_unreachable(); \ + }) + +#define fatal_perror(msg) \ + ({ \ + fprintf(stderr, "error: %s: " msg ": %s\n", \ + elf.path, strerror(errno)); \ + exit(EXIT_FAILURE); \ + __builtin_unreachable(); \ + }) + +#define assert_op(lhs, rhs, fmt, op) \ + ({ \ + typeof(lhs) _lhs = (lhs); \ + typeof(rhs) _rhs = (rhs); \ + \ + if (!(_lhs op _rhs)) { \ + fatal_error("assertion " #lhs " " #op " " #rhs \ + " failed (lhs=" fmt ", rhs=" fmt \ + ", line=%d)", _lhs, _rhs, __LINE__); \ + } \ + }) + +#define assert_eq(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, ==) +#define assert_ne(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, !=) +#define assert_lt(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, <) +#define assert_ge(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, >=) + +/* + * Return a pointer of a given type at a given offset from + * the beginning of the ELF file. + */ +#define elf_ptr(type, off) ((type *)(elf.begin + (off))) + +/* Iterate over all sections in the ELF. */ +#define for_each_section(var) \ + for (var = elf.sh_table; var < elf.sh_table + elf.ehdr->e_shnum; ++var) + +/* Iterate over all Elf64_Rela relocations in a given section. */ +#define for_each_rela(shdr, var) \ + for (var = elf_ptr(Elf64_Rela, shdr->sh_offset); \ + var < elf_ptr(Elf64_Rela, shdr->sh_offset + shdr->sh_size); var++) + +/* True if a string starts with a given prefix. */ +static inline bool starts_with(const char *str, const char *prefix) +{ + return memcmp(str, prefix, strlen(prefix)) == 0; +} + +/* Returns a string containing the name of a given section. */ +static inline const char *section_name(Elf64_Shdr *shdr) +{ + return elf.sh_string + shdr->sh_name; +} + +/* Returns a pointer to the first byte of section data. */ +static inline const char *section_begin(Elf64_Shdr *shdr) +{ + return elf_ptr(char, shdr->sh_offset); +} + +/* Find a section by its offset from the beginning of the file. */ +static inline Elf64_Shdr *section_by_off(Elf64_Off off) +{ + assert_ne(off, 0UL, "%lu"); + return elf_ptr(Elf64_Shdr, off); +} + +/* Find a section by its index. */ +static inline Elf64_Shdr *section_by_idx(uint16_t idx) +{ + assert_ne(idx, SHN_UNDEF, "%u"); + return &elf.sh_table[idx]; +} + +/* + * Memory-map the given ELF file, perform sanity checks, and + * populate global state. + */ +static void init_elf(const char *path) +{ + int fd, ret; + struct stat stat; + + /* Store path in the global struct for error printing. */ + elf.path = path; + + /* Open the ELF file. */ + fd = open(path, O_RDONLY); + if (fd < 0) + fatal_perror("Could not open ELF file"); + + /* Get status of ELF file to obtain its size. */ + ret = fstat(fd, &stat); + if (ret < 0) { + close(fd); + fatal_perror("Could not get status of ELF file"); + } + + /* mmap() the entire ELF file read-only at an arbitrary address. */ + elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (elf.begin == MAP_FAILED) { + close(fd); + fatal_perror("Could not mmap ELF file"); + } + + /* mmap() was successful, close the FD. */ + close(fd); + + /* Get pointer to the ELF header. */ + assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu"); + elf.ehdr = elf_ptr(Elf64_Ehdr, 0); + + /* Check the ELF magic. */ + assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x"); + assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x"); + assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x"); + assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x"); + + /* Sanity check that this is an ELF64 relocatable object for AArch64. */ + assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u"); + assert_eq(elf.ehdr->e_ident[EI_DATA], ELFDATA2LSB, "%u"); + assert_eq(elf.ehdr->e_type, ET_REL, "%u"); + assert_eq(elf.ehdr->e_machine, EM_AARCH64, "%u"); + + /* Populate fields of the global struct. */ + elf.sh_table = section_by_off(elf.ehdr->e_shoff); + elf.sh_string = section_begin(section_by_idx(elf.ehdr->e_shstrndx)); +} + +/* Print the prologue of the output ASM file. */ +static void emit_prologue(void) +{ + printf(".data\n" + ".pushsection " HYP_RELOC_SECTION ", \"a\"\n"); +} + +/* Print ASM statements needed as a prologue to a processed hyp section. */ +static void emit_section_prologue(const char *sh_orig_name) +{ + /* Declare the hyp section symbol. */ + printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name); +} + +/* + * Print ASM statements to create a hyp relocation entry for a given + * R_AARCH64_ABS64 relocation. + * + * The linker of vmlinux will populate the position given by `rela` with + * an absolute 64-bit kernel VA. If the kernel is relocatable, it will + * also generate a dynamic relocation entry so that the kernel can shift + * the address at runtime for KASLR. + * + * Emit a 32-bit offset from the current address to the position given + * by `rela`. This way the kernel can iterate over all kernel VAs used + * by hyp at runtime and convert them to hyp VAs. However, that offset + * will not be known until linking of `vmlinux`, so emit a PREL32 + * relocation referencing a symbol that the hyp linker script put at + * the beginning of the relocated section + the offset from `rela`. + */ +static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name) +{ + /* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */ + static size_t reloc_offset; + + /* Create storage for the 32-bit offset. */ + printf(".word 0\n"); + + /* + * Create a PREL32 relocation which instructs the linker of `vmlinux` + * to insert offset to position + , where is + * a symbol at the beginning of the relocated section, and + * is `rela->r_offset`. + */ + printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n", + reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name, + rela->r_offset); + + reloc_offset += 4; +} + +/* Print the epilogue of the output ASM file. */ +static void emit_epilogue(void) +{ + printf(".popsection\n"); +} + +/* + * Iterate over all RELA relocations in a given section and emit + * hyp relocation data for all absolute addresses in hyp code/data. + * + * Static relocations that generate PC-relative-addressing are ignored. + * Failure is reported for unexpected relocation types. + */ +static void emit_rela_section(Elf64_Shdr *sh_rela) +{ + Elf64_Shdr *sh_orig = &elf.sh_table[sh_rela->sh_info]; + const char *sh_orig_name = section_name(sh_orig); + Elf64_Rela *rela; + + /* Skip all non-hyp sections. */ + if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX)) + return; + + emit_section_prologue(sh_orig_name); + + for_each_rela(sh_rela, rela) { + uint32_t type = (uint32_t)rela->r_info; + + /* Check that rela points inside the relocated section. */ + assert_lt(rela->r_offset, sh_orig->sh_size, "0x%lx"); + + switch (type) { + /* + * Data relocations to generate absolute addressing. + * Emit a hyp relocation. + */ + case R_AARCH64_ABS64: + emit_rela_abs64(rela, sh_orig_name); + break; + /* Allow relocations to generate PC-relative addressing. */ + case R_AARCH64_LD_PREL_LO19: + case R_AARCH64_ADR_PREL_LO21: + case R_AARCH64_ADR_PREL_PG_HI21: + case R_AARCH64_ADR_PREL_PG_HI21_NC: + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + case R_AARCH64_LDST16_ABS_LO12_NC: + case R_AARCH64_LDST32_ABS_LO12_NC: + case R_AARCH64_LDST64_ABS_LO12_NC: + case R_AARCH64_LDST128_ABS_LO12_NC: + break; + /* Allow relative relocations for control-flow instructions. */ + case R_AARCH64_TSTBR14: + case R_AARCH64_CONDBR19: + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + break; + /* Allow group relocations to create PC-relative offset inline. */ + case R_AARCH64_MOVW_PREL_G0: + case R_AARCH64_MOVW_PREL_G0_NC: + case R_AARCH64_MOVW_PREL_G1: + case R_AARCH64_MOVW_PREL_G1_NC: + case R_AARCH64_MOVW_PREL_G2: + case R_AARCH64_MOVW_PREL_G2_NC: + case R_AARCH64_MOVW_PREL_G3: + break; + default: + fatal_error("Unexpected RELA type %u", type); + } + } +} + +/* Iterate over all sections and emit hyp relocation data for RELA sections. */ +static void emit_all_relocs(void) +{ + Elf64_Shdr *shdr; + + for_each_section(shdr) { + switch (shdr->sh_type) { + case SHT_REL: + fatal_error("Unexpected SHT_REL section \"%s\"", + section_name(shdr)); + case SHT_RELA: + emit_rela_section(shdr); + break; + } + } +} + +int main(int argc, const char **argv) +{ + if (argc != 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return EXIT_FAILURE; + } + + init_elf(argv[1]); + + emit_prologue(); + emit_all_relocs(); + emit_epilogue(); + + return EXIT_SUCCESS; +} -- cgit v1.2.3 From 6ec6259d7084ed32e164c9f7b69049464dd90fa5 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:38 +0000 Subject: KVM: arm64: Apply hyp relocations at runtime KVM nVHE code runs under a different VA mapping than the kernel, hence so far it avoided using absolute addressing because the VA in a constant pool is relocated by the linker to a kernel VA (see hyp_symbol_addr). Now the kernel has access to a list of positions that contain a kimg VA but will be accessed only in hyp execution context. These are generated by the gen-hyprel build-time tool and stored in .hyp.reloc. Add early boot pass over the entries and convert the kimg VAs to hyp VAs. Note that this requires for .hyp* ELF sections to be mapped read-write at that point. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-6-dbrazdil@google.com --- arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/include/asm/sections.h | 1 + arch/arm64/kernel/smp.c | 4 +++- arch/arm64/kvm/va_layout.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e52d82aeadca..6bbb44011c84 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -129,6 +129,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); +void kvm_apply_hyp_relocations(void); static __always_inline unsigned long __kern_hyp_va(unsigned long v) { diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index a6f3557d1ab2..2f36b16a5b5d 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -12,6 +12,7 @@ extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; extern char __hyp_rodata_start[], __hyp_rodata_end[]; +extern char __hyp_reloc_begin[], __hyp_reloc_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ad00f99ee9b0..357590beaabb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -434,8 +434,10 @@ static void __init hyp_mode_check(void) "CPU: CPUs started in inconsistent modes"); else pr_info("CPU: All CPU(s) started at EL1\n"); - if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) + if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) { kvm_compute_layout(); + kvm_apply_hyp_relocations(); + } } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 70fcd6a12fe1..fee7dcd95d73 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -81,6 +81,34 @@ __init void kvm_compute_layout(void) init_hyp_physvirt_offset(); } +/* + * The .hyp.reloc ELF section contains a list of kimg positions that + * contains kimg VAs but will be accessed only in hyp execution context. + * Convert them to hyp VAs. See gen-hyprel.c for more details. + */ +__init void kvm_apply_hyp_relocations(void) +{ + int32_t *rel; + int32_t *begin = (int32_t *)__hyp_reloc_begin; + int32_t *end = (int32_t *)__hyp_reloc_end; + + for (rel = begin; rel < end; ++rel) { + uintptr_t *ptr, kimg_va; + + /* + * Each entry contains a 32-bit relative offset from itself + * to a kimg VA position. + */ + ptr = (uintptr_t *)lm_alias((char *)rel + *rel); + + /* Read the kimg VA value at the relocation address. */ + kimg_va = *ptr; + + /* Convert to hyp VA and store back to the relocation address. */ + *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va)); + } +} + static u32 compute_instruction(int n, u32 rd, u32 rn) { u32 insn = AARCH64_BREAK_FAULT; -- cgit v1.2.3 From 97cbd2fc0257c6af7036a9a6415ca8ad43535d6b Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:39 +0000 Subject: KVM: arm64: Fix constant-pool users in hyp Hyp code uses absolute addressing to obtain a kimg VA of a small number of kernel symbols. Since the kernel now converts constant pool addresses to hyp VAs, this trick does not work anymore. Change the helpers to convert from hyp VA back to kimg VA or PA, as needed and rework the callers accordingly. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-7-dbrazdil@google.com --- arch/arm64/include/asm/kvm_mmu.h | 42 +++++++++++++++----------------------- arch/arm64/kvm/hyp/nvhe/host.S | 29 +++++++++++++------------- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 2 -- 3 files changed, 31 insertions(+), 42 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6bbb44011c84..adadc468cc71 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -73,49 +73,39 @@ alternative_cb_end .endm /* - * Convert a kernel image address to a PA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a PA + * reg: hypervisor address to be converted in place * tmp: temporary register - * - * The actual code generation takes place in kvm_get_kimage_voffset, and - * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_get_kimage_voffset uses the - * specific registers encoded in the instructions). */ -.macro kimg_pa reg, tmp -alternative_cb kvm_get_kimage_voffset - movz \tmp, #0 - movk \tmp, #0, lsl #16 - movk \tmp, #0, lsl #32 - movk \tmp, #0, lsl #48 -alternative_cb_end - - /* reg = __pa(reg) */ - sub \reg, \reg, \tmp +.macro hyp_pa reg, tmp + ldr_l \tmp, hyp_physvirt_offset + add \reg, \reg, \tmp .endm /* - * Convert a kernel image address to a hyp VA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a kernel image address + * reg: hypervisor address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_update_kimg_phys_offset uses the + * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions). */ -.macro kimg_hyp_va reg, tmp -alternative_cb kvm_update_kimg_phys_offset +.macro hyp_kimg_va reg, tmp + /* Convert hyp VA -> PA. */ + hyp_pa \reg, \tmp + + /* Load kimage_voffset. */ +alternative_cb kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #48 alternative_cb_end - sub \reg, \reg, \tmp - mov_q \tmp, PAGE_OFFSET - orr \reg, \reg, \tmp - kern_hyp_va \reg + /* Convert PA -> kimg VA. */ + add \reg, \reg, \tmp .endm #else diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index a820dfdc9c25..6585a7cbbc56 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter) * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par); */ SYM_FUNC_START(__hyp_do_panic) - /* Load the format arguments into x1-7 */ - mov x6, x3 - get_vcpu_ptr x7, x3 - - mrs x3, esr_el2 - mrs x4, far_el2 - mrs x5, hpfar_el2 - /* Prepare and exit to the host's panic funciton. */ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr ldr lr, =panic + hyp_kimg_va lr, x6 msr elr_el2, lr - /* - * Set the panic format string and enter the host, conditionally - * restoring the host context. - */ + /* Set the panic format string. Use the, now free, LR as scratch. */ + ldr lr, =__hyp_panic_string + hyp_kimg_va lr, x6 + + /* Load the format arguments into x1-7. */ + mov x6, x3 + get_vcpu_ptr x7, x3 + mrs x3, esr_el2 + mrs x4, far_el2 + mrs x5, hpfar_el2 + + /* Enter the host, conditionally restoring the host context. */ cmp x0, xzr - ldr x0, =__hyp_panic_string + mov x0, lr b.eq __host_enter_without_restoring b __host_enter_for_panic SYM_FUNC_END(__hyp_do_panic) @@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic) * Preserve x0-x4, which may contain stub parameters. */ ldr x5, =__kvm_handle_stub_hvc - kimg_pa x5, x6 + hyp_pa x5, x6 br x5 .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 68fd64f2313e..99b408fe09ee 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -139,7 +139,6 @@ alternative_else_nop_endif /* Set the host vector */ ldr x0, =__kvm_hyp_host_vector - kimg_hyp_va x0, x1 msr vbar_el2, x0 ret @@ -198,7 +197,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) /* Leave idmap. */ mov x0, x29 ldr x1, =kvm_host_psci_cpu_entry - kimg_hyp_va x1, x2 br x1 SYM_CODE_END(__kvm_hyp_init_cpu) -- cgit v1.2.3 From 537db4af26e3f2e0f304f2032bc593f7e2a54938 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:40 +0000 Subject: KVM: arm64: Remove patching of fn pointers in hyp Storing a function pointer in hyp now generates relocation information used at early boot to convert the address to hyp VA. The existing alternative-based conversion mechanism is therefore obsolete. Remove it and simplify its users. Acked-by: Ard Biesheuvel Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-8-dbrazdil@google.com --- arch/arm64/include/asm/kvm_mmu.h | 18 ------------------ arch/arm64/kernel/image-vars.h | 1 - arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 ++++------- arch/arm64/kvm/va_layout.c | 6 ------ 4 files changed, 4 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index adadc468cc71..90873851f677 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -135,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) -static __always_inline unsigned long __kimg_hyp_va(unsigned long v) -{ - unsigned long offset; - - asm volatile(ALTERNATIVE_CB("movz %0, #0\n" - "movk %0, #0, lsl #16\n" - "movk %0, #0, lsl #32\n" - "movk %0, #0, lsl #48\n", - kvm_update_kimg_phys_offset) - : "=r" (offset)); - - return __kern_hyp_va((v - offset) | PAGE_OFFSET); -} - -#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) - -#define kimg_fn_ptr(x) (typeof(x) **)(x) - /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index f676243abac6..23f1a557bd9f 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,7 +64,6 @@ __efistub__ctype = _ctype; /* Alternative callbacks for init-time patching of nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); -KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset); /* Global kernel state accessed by nVHE hyp code. */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index a906f9e2ff34..f012f8665ecc 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) typedef void (*hcall_t)(struct kvm_cpu_context *); -#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x) +#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x -static const hcall_t *host_hcall[] = { +static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), @@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = { static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(unsigned long, id, host_ctxt, 0); - const hcall_t *kfn; hcall_t hfn; id -= KVM_HOST_SMCCC_ID(0); @@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) if (unlikely(id >= ARRAY_SIZE(host_hcall))) goto inval; - kfn = host_hcall[id]; - if (unlikely(!kfn)) + hfn = host_hcall[id]; + if (unlikely(!hfn)) goto inval; cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS; - - hfn = kimg_fn_hyp_va(kfn); hfn(host_ctxt); return; diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index fee7dcd95d73..978301392d67 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -283,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst *updptr++ = cpu_to_le32(insn); } -void kvm_update_kimg_phys_offset(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) -{ - generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst); -} - void kvm_get_kimage_voffset(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { -- cgit v1.2.3 From 247bc166e6b3b1e4068f120f55582a3aa210cc2d Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:41 +0000 Subject: KVM: arm64: Remove hyp_symbol_addr Hyp code used the hyp_symbol_addr helper to force PC-relative addressing because absolute addressing results in kernel VAs due to the way hyp code is linked. This is not true anymore, so remove the helper and update all of its users. Acked-by: Ard Biesheuvel Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-9-dbrazdil@google.com --- arch/arm64/include/asm/kvm_asm.h | 26 -------------------------- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/hyp-smp.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/psci-relay.c | 24 ++++++++++++------------ arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c | 2 +- 5 files changed, 17 insertions(+), 43 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 8a33d83ea843..22d933e9b59e 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -#if defined(GCC_VERSION) && GCC_VERSION < 50000 -#define SYM_CONSTRAINT "i" -#else -#define SYM_CONSTRAINT "S" -#endif - -/* - * Obtain the PC-relative address of a kernel symbol - * s: symbol - * - * The goal of this macro is to return a symbol's address based on a - * PC-relative computation, as opposed to a loading the VA from a - * constant pool or something similar. This works well for HYP, as an - * absolute VA is guaranteed to be wrong. Only use this if trying to - * obtain the address of a symbol (i.e. not something you obtained by - * following a pointer). - */ -#define hyp_symbol_addr(s) \ - ({ \ - typeof(s) *addr; \ - asm("adrp %0, %1\n" \ - "add %0, %0, :lo12:%1\n" \ - : "=r" (addr) : SYM_CONSTRAINT (&s)); \ - addr; \ - }) - #define __KVM_EXTABLE(from, to) \ " .pushsection __kvm_ex_table, \"a\"\n" \ " .align 3\n" \ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 84473574c2e7..54f4860cd87c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void) struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); - entry = hyp_symbol_addr(__start___kvm_ex_table); - end = hyp_symbol_addr(__stop___kvm_ex_table); + entry = &__start___kvm_ex_table; + end = &__stop___kvm_ex_table; while (entry < end) { addr = (unsigned long)&entry->insn + entry->insn; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c index 2997aa156d8e..879559057dee 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c @@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu) if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base)) hyp_panic(); - cpu_base_array = (unsigned long *)hyp_symbol_addr(kvm_arm_hyp_percpu_base); + cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base; this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); - elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start); + elf_base = (unsigned long)&__per_cpu_start; return this_cpu_base - elf_base; } diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index e3947846ffcb..f254a425cb3a 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -134,8 +134,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) if (cpu_id == INVALID_CPU_ID) return PSCI_RET_INVALID_PARAMS; - boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id); - init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id); + boot_args = per_cpu_ptr(&cpu_on_args, cpu_id); + init_params = per_cpu_ptr(&kvm_init_params, cpu_id); /* Check if the target CPU is already being booted. */ if (!try_acquire_boot_args(boot_args)) @@ -146,7 +146,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) wmb(); ret = psci_call(func_id, mpidr, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)), + __hyp_pa(&kvm_hyp_cpu_entry), __hyp_pa(init_params)); /* If successful, the lock will be released by the target CPU. */ @@ -165,8 +165,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); - init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); + boot_args = this_cpu_ptr(&suspend_args); + init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core @@ -180,7 +180,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) * point if it is a deep sleep state. */ return psci_call(func_id, power_state, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), + __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params)); } @@ -192,8 +192,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); - init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); + boot_args = this_cpu_ptr(&suspend_args); + init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core @@ -204,7 +204,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) /* Will only return on error. */ return psci_call(func_id, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), + __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params), 0); } @@ -213,12 +213,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; - host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt; + host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; if (is_cpu_on) - boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args)); + boot_args = this_cpu_ptr(&cpu_on_args); else - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); + boot_args = this_cpu_ptr(&suspend_args); cpu_reg(host_ctxt, 0) = boot_args->r0; write_sysreg_el2(boot_args->pc, SYS_ELR); diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 8f0585640241..87a54375bd6e 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) } rd = kvm_vcpu_dabt_get_rd(vcpu); - addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; + addr = kvm_vgic_global_state.vcpu_hyp_va; addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { -- cgit v1.2.3 From 87b26801f02ca9d7a110eb598dae8cd5d3bcace2 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Mon, 25 Jan 2021 14:54:14 +0000 Subject: KVM: arm64: Simplify __kvm_hyp_init HVC detection The arguments for __do_hyp_init are now passed with a pointer to a struct which means there are scratch registers available for use. Thanks to this, we no longer need to use clever, but hard to read, tricks that avoid the need for scratch registers when checking for the __kvm_hyp_init HVC. Tested-by: David Brazdil Signed-off-by: Andrew Scull Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210125145415.122439-2-ascull@google.com --- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 31b060a44045..b3915ccb23b0 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -55,17 +55,10 @@ __do_hyp_init: cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc - // We only actively check bits [24:31], and everything - // else has to be zero, which we check at build time. -#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF) -#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value -#endif - - ror x0, x0, #24 - eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF) - ror x0, x0, #4 - eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF) - cbz x0, 1f + mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) + cmp x0, x3 + b.eq 1f + mov x0, #SMCCC_RET_NOT_SUPPORTED eret -- cgit v1.2.3 From 8ed80051c8c31d1587722fdb3af16677eba9d693 Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Thu, 14 Jan 2021 20:13:48 +0800 Subject: KVM: arm64: Adjust partial code of hyp stage-1 map and guest stage-2 map Procedures of hyp stage-1 map and guest stage-2 map are quite different, but they are tied closely by function kvm_set_valid_leaf_pte(). So adjust the relative code for ease of code maintenance in the future. Signed-off-by: Will Deacon Signed-off-by: Yanan Wang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210114121350.123684-2-wangyanan55@huawei.com --- arch/arm64/kvm/hyp/pgtable.c | 55 ++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index bdf8e55ed308..a11ac874bc2a 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -170,10 +170,9 @@ static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp) smp_store_release(ptep, pte); } -static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, - u32 level) +static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) { - kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(pa); + kvm_pte_t pte = kvm_phys_to_pte(pa); u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE : KVM_PTE_TYPE_BLOCK; @@ -181,12 +180,7 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr, pte |= FIELD_PREP(KVM_PTE_TYPE, type); pte |= KVM_PTE_VALID; - /* Tolerate KVM recreating the exact same mapping. */ - if (kvm_pte_valid(old)) - return old == pte; - - smp_store_release(ptep, pte); - return true; + return pte; } static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr, @@ -341,12 +335,17 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot, static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct hyp_map_data *data) { + kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; if (!kvm_block_mapping_supported(addr, end, phys, level)) return false; - WARN_ON(!kvm_set_valid_leaf_pte(ptep, phys, data->attr, level)); + /* Tolerate KVM recreating the exact same mapping */ + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + if (old != new && !WARN_ON(kvm_pte_valid(old))) + smp_store_release(ptep, new); + data->phys += granule; return true; } @@ -465,27 +464,30 @@ static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { + kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; + struct page *page = virt_to_page(ptep); if (!kvm_block_mapping_supported(addr, end, phys, level)) return false; - /* - * If the PTE was already valid, drop the refcount on the table - * early, as it will be bumped-up again in stage2_map_walk_leaf(). - * This ensures that the refcount stays constant across a valid to - * valid PTE update. - */ - if (kvm_pte_valid(*ptep)) - put_page(virt_to_page(ptep)); + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + if (kvm_pte_valid(old)) { + /* Tolerate KVM recreating the exact same mapping */ + if (old == new) + goto out; - if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level)) - goto out; + /* + * There's an existing different valid leaf entry, so perform + * break-before-make. + */ + kvm_set_invalid_pte(ptep); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); + put_page(page); + } - /* There's an existing valid leaf entry, so perform break-before-make */ - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); - kvm_set_valid_leaf_pte(ptep, phys, data->attr, level); + smp_store_release(ptep, new); + get_page(page); out: data->phys += granule; return true; @@ -527,7 +529,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, } if (stage2_map_walker_try_leaf(addr, end, level, ptep, data)) - goto out_get_page; + return 0; if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; @@ -551,9 +553,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, } kvm_set_table_pte(ptep, childp); - -out_get_page: get_page(page); + return 0; } -- cgit v1.2.3 From 694d071f8d85d504055540a27f0dbe9dbf44584e Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Thu, 14 Jan 2021 20:13:49 +0800 Subject: KVM: arm64: Filter out the case of only changing permissions from stage-2 map path (1) During running time of a a VM with numbers of vCPUs, if some vCPUs access the same GPA almost at the same time and the stage-2 mapping of the GPA has not been built yet, as a result they will all cause translation faults. The first vCPU builds the mapping, and the followed ones end up updating the valid leaf PTE. Note that these vCPUs might want different access permissions (RO, RW, RX, RWX, etc.). (2) It's inevitable that we sometimes will update an existing valid leaf PTE in the map path, and we perform break-before-make in this case. Then more unnecessary translation faults could be caused if the *break stage* of BBM is just catched by other vCPUS. With (1) and (2), something unsatisfactory could happen: vCPU A causes a translation fault and builds the mapping with RW permissions, vCPU B then update the valid leaf PTE with break-before-make and permissions are updated back to RO. Besides, *break stage* of BBM may trigger more translation faults. Finally, some useless small loops could occur. We can make some optimization to solve above problems: When we need to update a valid leaf PTE in the map path, let's filter out the case where this update only change access permissions, and don't update the valid leaf PTE here in this case. Instead, let the vCPU enter back the guest and it will exit next time to go through the relax_perms path without break-before-make if it still wants more permissions. Signed-off-by: Yanan Wang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210114121350.123684-3-wangyanan55@huawei.com --- arch/arm64/include/asm/kvm_pgtable.h | 5 +++++ arch/arm64/kvm/hyp/pgtable.c | 32 +++++++++++++++++++++----------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 52ab38db04c7..8886d43cfb11 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -157,6 +157,11 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); * If device attributes are not explicitly requested in @prot, then the * mapping will be normal, cacheable. * + * Note that the update of a valid leaf PTE in this function will be aborted, + * if it's trying to recreate the exact same mapping or only change the access + * permissions. Instead, the vCPU will exit one more time from guest if still + * needed and then go through the path of relaxing permissions. + * * Note that this function will both coalesce existing table entries and split * existing block mappings, relying on page-faults to fault back areas outside * of the new mapping lazily. diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index a11ac874bc2a..4d177ce1d536 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -45,6 +45,10 @@ #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) +#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ + KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ + KVM_PTE_LEAF_ATTR_HI_S2_XN) + struct kvm_pgtable_walk_data { struct kvm_pgtable *pgt; struct kvm_pgtable_walker *walker; @@ -460,22 +464,27 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, return 0; } -static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, - kvm_pte_t *ptep, - struct stage2_map_data *data) +static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, + kvm_pte_t *ptep, + struct stage2_map_data *data) { kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; struct page *page = virt_to_page(ptep); if (!kvm_block_mapping_supported(addr, end, phys, level)) - return false; + return -E2BIG; new = kvm_init_valid_leaf_pte(phys, data->attr, level); if (kvm_pte_valid(old)) { - /* Tolerate KVM recreating the exact same mapping */ - if (old == new) - goto out; + /* + * Skip updating the PTE if we are trying to recreate the exact + * same mapping or only change the access permissions. Instead, + * the vCPU will exit one more time from guest if still needed + * and then go through the path of relaxing permissions. + */ + if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS))) + return -EAGAIN; /* * There's an existing different valid leaf entry, so perform @@ -488,9 +497,8 @@ static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, smp_store_release(ptep, new); get_page(page); -out: data->phys += granule; - return true; + return 0; } static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, @@ -518,6 +526,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { + int ret; kvm_pte_t *childp, pte = *ptep; struct page *page = virt_to_page(ptep); @@ -528,8 +537,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, return 0; } - if (stage2_map_walker_try_leaf(addr, end, level, ptep, data)) - return 0; + ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data); + if (ret != -E2BIG) + return ret; if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; -- cgit v1.2.3 From 509552e65ae8287178a5cdea2d734dcd2d6380ab Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Thu, 14 Jan 2021 20:13:50 +0800 Subject: KVM: arm64: Mark the page dirty only if the fault is handled successfully We now set the pfn dirty and mark the page dirty before calling fault handlers in user_mem_abort(), so we might end up having spurious dirty pages if update of permissions or mapping has failed. Let's move these two operations after the fault handlers, and they will be done only if the fault has been handled successfully. When an -EAGAIN errno is returned from the map handler, we hope to the vcpu to enter guest directly instead of exiting back to userspace, so adjust the return value at the end of function. Signed-off-by: Yanan Wang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210114121350.123684-4-wangyanan55@huawei.com --- arch/arm64/kvm/mmu.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 7d2257cc5438..77cb2d28f2a4 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -879,11 +879,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (vma_pagesize == PAGE_SIZE && !force_pte) vma_pagesize = transparent_hugepage_adjust(memslot, hva, &pfn, &fault_ipa); - if (writable) { + if (writable) prot |= KVM_PGTABLE_PROT_W; - kvm_set_pfn_dirty(pfn); - mark_page_dirty(kvm, gfn); - } if (fault_status != FSC_PERM && !device) clean_dcache_guest_page(pfn, vma_pagesize); @@ -911,11 +908,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, memcache); } + /* Mark the page dirty only if the fault is handled successfully */ + if (writable && !ret) { + kvm_set_pfn_dirty(pfn); + mark_page_dirty(kvm, gfn); + } + out_unlock: spin_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); - return ret; + return ret != -EAGAIN ? ret : 0; } /* Resolve the access fault by making the page young again. */ -- cgit v1.2.3 From a8e190cdae1bf8e9e490776b8179babc1962bb25 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 6 Jan 2021 10:34:53 +0000 Subject: KVM: arm64: Implement the TRNG hypervisor call Provide a hypervisor implementation of the ARM architected TRNG firmware interface described in ARM spec DEN0098. All function IDs are implemented, including both 32-bit and 64-bit versions of the TRNG_RND service, which is the centerpiece of the API. The API is backed by the kernel's entropy pool only, to avoid guests draining more precious direct entropy sources. Signed-off-by: Ard Biesheuvel [Andre: minor fixes, drop arch_get_random() usage] Signed-off-by: Andre Przywara Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210106103453.152275-6-andre.przywara@arm.com --- arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/hypercalls.c | 6 +++ arch/arm64/kvm/trng.c | 85 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kvm/trng.c diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8fcfab0c2567..084d11a2768c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -771,4 +771,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_vcpu_has_pmu(vcpu) \ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) +int kvm_trng_call(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 13b017284bf9..589921392cb1 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -16,7 +16,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ inject_fault.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o \ - arch_timer.o \ + arch_timer.o trng.o\ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 25ea4ecb6449..ead21b98b620 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -71,6 +71,12 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) if (gpa != GPA_INVALID) val = gpa; break; + case ARM_SMCCC_TRNG_VERSION: + case ARM_SMCCC_TRNG_FEATURES: + case ARM_SMCCC_TRNG_GET_UUID: + case ARM_SMCCC_TRNG_RND32: + case ARM_SMCCC_TRNG_RND64: + return kvm_trng_call(vcpu); default: return kvm_psci_call(vcpu); } diff --git a/arch/arm64/kvm/trng.c b/arch/arm64/kvm/trng.c new file mode 100644 index 000000000000..99bdd7103c9c --- /dev/null +++ b/arch/arm64/kvm/trng.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2020 Arm Ltd. + +#include +#include + +#include + +#include + +#define ARM_SMCCC_TRNG_VERSION_1_0 0x10000UL + +/* Those values are deliberately separate from the generic SMCCC definitions. */ +#define TRNG_SUCCESS 0UL +#define TRNG_NOT_SUPPORTED ((unsigned long)-1) +#define TRNG_INVALID_PARAMETER ((unsigned long)-2) +#define TRNG_NO_ENTROPY ((unsigned long)-3) + +#define TRNG_MAX_BITS64 192 + +static const uuid_t arm_smc_trng_uuid __aligned(4) = UUID_INIT( + 0x0d21e000, 0x4384, 0x11eb, 0x80, 0x70, 0x52, 0x44, 0x55, 0x4e, 0x5a, 0x4c); + +static int kvm_trng_do_rnd(struct kvm_vcpu *vcpu, int size) +{ + DECLARE_BITMAP(bits, TRNG_MAX_BITS64); + u32 num_bits = smccc_get_arg1(vcpu); + int i; + + if (num_bits > 3 * size) { + smccc_set_retval(vcpu, TRNG_INVALID_PARAMETER, 0, 0, 0); + return 1; + } + + /* get as many bits as we need to fulfil the request */ + for (i = 0; i < DIV_ROUND_UP(num_bits, BITS_PER_LONG); i++) + bits[i] = get_random_long(); + + bitmap_clear(bits, num_bits, TRNG_MAX_BITS64 - num_bits); + + if (size == 32) + smccc_set_retval(vcpu, TRNG_SUCCESS, lower_32_bits(bits[1]), + upper_32_bits(bits[0]), lower_32_bits(bits[0])); + else + smccc_set_retval(vcpu, TRNG_SUCCESS, bits[2], bits[1], bits[0]); + + memzero_explicit(bits, sizeof(bits)); + return 1; +} + +int kvm_trng_call(struct kvm_vcpu *vcpu) +{ + const __le32 *u = (__le32 *)arm_smc_trng_uuid.b; + u32 func_id = smccc_get_function(vcpu); + unsigned long val = TRNG_NOT_SUPPORTED; + int size = 64; + + switch (func_id) { + case ARM_SMCCC_TRNG_VERSION: + val = ARM_SMCCC_TRNG_VERSION_1_0; + break; + case ARM_SMCCC_TRNG_FEATURES: + switch (smccc_get_arg1(vcpu)) { + case ARM_SMCCC_TRNG_VERSION: + case ARM_SMCCC_TRNG_FEATURES: + case ARM_SMCCC_TRNG_GET_UUID: + case ARM_SMCCC_TRNG_RND32: + case ARM_SMCCC_TRNG_RND64: + val = TRNG_SUCCESS; + } + break; + case ARM_SMCCC_TRNG_GET_UUID: + smccc_set_retval(vcpu, le32_to_cpu(u[0]), le32_to_cpu(u[1]), + le32_to_cpu(u[2]), le32_to_cpu(u[3])); + return 1; + case ARM_SMCCC_TRNG_RND32: + size = 32; + fallthrough; + case ARM_SMCCC_TRNG_RND64: + return kvm_trng_do_rnd(vcpu, size); + } + + smccc_set_retval(vcpu, val, 0, 0, 0); + return 1; +} -- cgit v1.2.3 From bc93763f178fd04f484ee3e521bbdae57fddb891 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 30 Jan 2021 13:07:51 +0000 Subject: KVM: arm64: Make gen-hyprel endianness agnostic gen-hyprel is, for better or worse, a native-endian program: it assumes that the ELF data structures are in the host's endianness, and even assumes that the compiled kernel is little-endian in one particular case. None of these assumptions hold true though: people actually build (use?) BE arm64 kernels, and seem to avoid doing so on BE hosts. Madness! In order to solve this, wrap each access to the ELF data structures with the required byte-swapping magic. This requires to obtain the kernel data structure, and provide per-endianess wrappers. This result in a kernel that links and even boots in a model. Fixes: 8c49b5d43d4c ("KVM: arm64: Generate hyp relocation data") Reported-by: Guenter Roeck Tested-by: Guenter Roeck Acked-by: David Brazdil Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/nvhe/Makefile | 1 + arch/arm64/kvm/hyp/nvhe/gen-hyprel.c | 57 ++++++++++++++++++++++++++---------- 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 268be1376f74..ed10fcf1b345 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -7,6 +7,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ hostprogs := gen-hyprel +HOST_EXTRACFLAGS += -I$(objtree)/include obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c index 58fe31fdba8e..ead02c6a7628 100644 --- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c @@ -25,6 +25,7 @@ */ #include +#include #include #include #include @@ -36,6 +37,8 @@ #include #include +#include + #define HYP_SECTION_PREFIX ".hyp" #define HYP_RELOC_SECTION ".hyp.reloc" #define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_" @@ -121,6 +124,28 @@ static struct { const char *sh_string; } elf; +#if defined(CONFIG_CPU_LITTLE_ENDIAN) + +#define elf16toh(x) le16toh(x) +#define elf32toh(x) le32toh(x) +#define elf64toh(x) le64toh(x) + +#define ELFENDIAN ELFDATA2LSB + +#elif defined(CONFIG_CPU_BIG_ENDIAN) + +#define elf16toh(x) be16toh(x) +#define elf32toh(x) be32toh(x) +#define elf64toh(x) be64toh(x) + +#define ELFENDIAN ELFDATA2MSB + +#else + +#error PDP-endian sadly unsupported... + +#endif + #define fatal_error(fmt, ...) \ ({ \ fprintf(stderr, "error: %s: " fmt "\n", \ @@ -162,12 +187,12 @@ static struct { /* Iterate over all sections in the ELF. */ #define for_each_section(var) \ - for (var = elf.sh_table; var < elf.sh_table + elf.ehdr->e_shnum; ++var) + for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var) /* Iterate over all Elf64_Rela relocations in a given section. */ #define for_each_rela(shdr, var) \ - for (var = elf_ptr(Elf64_Rela, shdr->sh_offset); \ - var < elf_ptr(Elf64_Rela, shdr->sh_offset + shdr->sh_size); var++) + for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset)); \ + var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++) /* True if a string starts with a given prefix. */ static inline bool starts_with(const char *str, const char *prefix) @@ -178,13 +203,13 @@ static inline bool starts_with(const char *str, const char *prefix) /* Returns a string containing the name of a given section. */ static inline const char *section_name(Elf64_Shdr *shdr) { - return elf.sh_string + shdr->sh_name; + return elf.sh_string + elf32toh(shdr->sh_name); } /* Returns a pointer to the first byte of section data. */ static inline const char *section_begin(Elf64_Shdr *shdr) { - return elf_ptr(char, shdr->sh_offset); + return elf_ptr(char, elf64toh(shdr->sh_offset)); } /* Find a section by its offset from the beginning of the file. */ @@ -247,13 +272,13 @@ static void init_elf(const char *path) /* Sanity check that this is an ELF64 relocatable object for AArch64. */ assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u"); - assert_eq(elf.ehdr->e_ident[EI_DATA], ELFDATA2LSB, "%u"); - assert_eq(elf.ehdr->e_type, ET_REL, "%u"); - assert_eq(elf.ehdr->e_machine, EM_AARCH64, "%u"); + assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u"); + assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u"); + assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u"); /* Populate fields of the global struct. */ - elf.sh_table = section_by_off(elf.ehdr->e_shoff); - elf.sh_string = section_begin(section_by_idx(elf.ehdr->e_shstrndx)); + elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff)); + elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx))); } /* Print the prologue of the output ASM file. */ @@ -301,8 +326,8 @@ static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name) * is `rela->r_offset`. */ printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n", - reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name, - rela->r_offset); + reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name, + elf64toh(rela->r_offset)); reloc_offset += 4; } @@ -322,7 +347,7 @@ static void emit_epilogue(void) */ static void emit_rela_section(Elf64_Shdr *sh_rela) { - Elf64_Shdr *sh_orig = &elf.sh_table[sh_rela->sh_info]; + Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)]; const char *sh_orig_name = section_name(sh_orig); Elf64_Rela *rela; @@ -333,10 +358,10 @@ static void emit_rela_section(Elf64_Shdr *sh_rela) emit_section_prologue(sh_orig_name); for_each_rela(sh_rela, rela) { - uint32_t type = (uint32_t)rela->r_info; + uint32_t type = (uint32_t)elf64toh(rela->r_info); /* Check that rela points inside the relocated section. */ - assert_lt(rela->r_offset, sh_orig->sh_size, "0x%lx"); + assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx"); switch (type) { /* @@ -385,7 +410,7 @@ static void emit_all_relocs(void) Elf64_Shdr *shdr; for_each_section(shdr) { - switch (shdr->sh_type) { + switch (elf32toh(shdr->sh_type)) { case SHT_REL: fatal_error("Unexpected SHT_REL section \"%s\"", section_name(shdr)); -- cgit v1.2.3 From bea7e97fef888421ecc21d03c6e4f9ae1451a78d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 15 Feb 2020 17:02:06 +0000 Subject: KVM: arm64: Fix missing RES1 in emulation of DBGBIDR The AArch32 CP14 DBGDIDR has bit 15 set to RES1, which our current emulation doesn't set. Just add the missing bit. Reported-by: Peter Maydell Reviewed-by: Eric Auger Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3313dedfa505..0c0832472c4a 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1711,7 +1711,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) - | (6 << 16) | (el3 << 14) | (el3 << 12)); + | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } } -- cgit v1.2.3 From cb95914685ca6514da9a1592b19255fe679557eb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 31 Dec 2020 11:18:03 +0000 Subject: KVM: arm64: Fix AArch32 PMUv3 capping We shouldn't expose *any* PMU capability when no PMU has been configured for this VM. Reviewed-by: Eric Auger Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0c0832472c4a..ce08d28ab15c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1048,8 +1048,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, } else if (id == SYS_ID_DFR0_EL1) { /* Limit guests to PMUv3 for ARMv8.1 */ val = cpuid_feature_cap_perfmon_field(val, - ID_DFR0_PERFMON_SHIFT, - ID_DFR0_PERFMON_8_1); + ID_DFR0_PERFMON_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_1 : 0); } return val; -- cgit v1.2.3 From 99b6a4013fe9331e462ccad351a8ac7a2cb330d6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 31 Dec 2020 11:39:01 +0000 Subject: KVM: arm64: Add handling of AArch32 PCMEID{2,3} PMUv3 registers Despite advertising support for AArch32 PMUv3p1, we fail to handle the PMCEID{2,3} registers, which conveniently alias with the top bits of PMCEID{0,1}_EL1. Implement these registers with the usual AA32(HI/LO) aliasing mechanism. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ce08d28ab15c..2bea0494b81d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -685,14 +685,18 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 pmceid; + u64 pmceid, mask, shift; BUG_ON(p->is_write); if (pmu_access_el0_disabled(vcpu)) return false; + get_access_mask(r, &mask, &shift); + pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); + pmceid &= mask; + pmceid >>= shift; p->regval = pmceid; @@ -1895,8 +1899,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, - { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, - { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, + { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, + { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, @@ -1904,6 +1908,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, + { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid }, + { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid }, /* PRRR/MAIR0 */ { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, -- cgit v1.2.3 From c8857935587c6335a1beb40bd2c5e8405c4626ae Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 15 Feb 2020 17:07:32 +0000 Subject: KVM: arm64: Refactor filtering of ID registers Our current ID register filtering is starting to be a mess of if() statements, and isn't going to get any saner. Let's turn it into a switch(), which has a chance of being more readable, and introduce a FEATURE() macro that allows easy generation of feature masks. No functionnal change intended. Reviewed-by: Eric Auger Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 51 ++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2bea0494b81d..dda16d60197b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -9,6 +9,7 @@ * Christoffer Dall */ +#include #include #include #include @@ -1016,6 +1017,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, return true; } +#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT)) + /* Read a sanitised cpufeature ID register by sys_reg_desc */ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r, bool raz) @@ -1024,36 +1027,38 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1) { + switch (id) { + case SYS_ID_AA64PFR0_EL1: if (!vcpu_has_sve(vcpu)) - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); - val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); - val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT); - val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT); - val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT); - val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT); - } else if (id == SYS_ID_AA64PFR1_EL1) { - val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT); - } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { - val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | - (0xfUL << ID_AA64ISAR1_API_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); - } else if (id == SYS_ID_AA64DFR0_EL1) { - u64 cap = 0; - + val &= ~FEATURE(ID_AA64PFR0_SVE); + val &= ~FEATURE(ID_AA64PFR0_AMU); + val &= ~FEATURE(ID_AA64PFR0_CSV2); + val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); + val &= ~FEATURE(ID_AA64PFR0_CSV3); + val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); + break; + case SYS_ID_AA64PFR1_EL1: + val &= ~FEATURE(ID_AA64PFR1_MTE); + break; + case SYS_ID_AA64ISAR1_EL1: + if (!vcpu_has_ptrauth(vcpu)) + val &= ~(FEATURE(ID_AA64ISAR1_APA) | + FEATURE(ID_AA64ISAR1_API) | + FEATURE(ID_AA64ISAR1_GPA) | + FEATURE(ID_AA64ISAR1_GPI)); + break; + case SYS_ID_AA64DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.1 */ - if (kvm_vcpu_has_pmu(vcpu)) - cap = ID_AA64DFR0_PMUVER_8_1; - val = cpuid_feature_cap_perfmon_field(val, - ID_AA64DFR0_PMUVER_SHIFT, - cap); - } else if (id == SYS_ID_DFR0_EL1) { + ID_AA64DFR0_PMUVER_SHIFT, + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_1 : 0); + break; + case SYS_ID_DFR0_EL1: /* Limit guests to PMUv3 for ARMv8.1 */ val = cpuid_feature_cap_perfmon_field(val, ID_DFR0_PERFMON_SHIFT, kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_1 : 0); + break; } return val; -- cgit v1.2.3 From 94893fc9ad8cdb05fdb64e00128997bc530e2ca9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 16 Feb 2020 17:44:29 +0000 Subject: KVM: arm64: Limit the debug architecture to ARMv8.0 Let's not pretend we support anything but ARMv8.0 as far as the debug architecture is concerned. Reviewed-by: Eric Auger Reviewed-by: Alexandru Elisei Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index dda16d60197b..8f79ec1fffa7 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1048,6 +1048,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, FEATURE(ID_AA64ISAR1_GPI)); break; case SYS_ID_AA64DFR0_EL1: + /* Limit debug to ARMv8.0 */ + val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); + val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); /* Limit guests to PMUv3 for ARMv8.1 */ val = cpuid_feature_cap_perfmon_field(val, ID_AA64DFR0_PMUVER_SHIFT, -- cgit v1.2.3 From 46081078feb451b5488c225c1e600ada24285c06 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 16 Feb 2020 18:17:22 +0000 Subject: KVM: arm64: Upgrade PMU support to ARMv8.4 Upgrading the PMU code from ARMv8.1 to ARMv8.4 turns out to be pretty easy. All that is required is support for PMMIR_EL1, which is read-only, and for which returning 0 is a valid option as long as we don't advertise STALL_SLOT as an implemented event. Let's just do that and adjust what we return to the guest. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/sysreg.h | 3 +++ arch/arm64/kvm/pmu-emul.c | 6 ++++++ arch/arm64/kvm/sys_regs.c | 11 +++++++---- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8b5e7e5c3cc8..2fb3f386588c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -846,7 +846,10 @@ #define ID_DFR0_PERFMON_SHIFT 24 +#define ID_DFR0_PERFMON_8_0 0x3 #define ID_DFR0_PERFMON_8_1 0x4 +#define ID_DFR0_PERFMON_8_4 0x5 +#define ID_DFR0_PERFMON_8_5 0x6 #define ID_ISAR4_SWP_FRAC_SHIFT 28 #define ID_ISAR4_PSR_M_SHIFT 24 diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 398f6df1bbe4..72cd704a8368 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -795,6 +795,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) base = 0; } else { val = read_sysreg(pmceid1_el0); + /* + * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled + * as RAZ + */ + if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4) + val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 8f79ec1fffa7..5da536ab738d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1051,16 +1051,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, /* Limit debug to ARMv8.0 */ val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); - /* Limit guests to PMUv3 for ARMv8.1 */ + /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, ID_AA64DFR0_PMUVER_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_1 : 0); + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); break; case SYS_ID_DFR0_EL1: - /* Limit guests to PMUv3 for ARMv8.1 */ + /* Limit guests to PMUv3 for ARMv8.4 */ val = cpuid_feature_cap_perfmon_field(val, ID_DFR0_PERFMON_SHIFT, - kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_1 : 0); + kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0); break; } @@ -1496,6 +1496,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, + { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, @@ -1918,6 +1919,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid }, { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid }, + /* PMMIR */ + { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi }, /* PRRR/MAIR0 */ { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, -- cgit v1.2.3 From 8e26d11f680a323f7e1073038c454df39307dfad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 15 Jan 2021 17:15:33 +0000 Subject: KVM: arm64: Use symbolic names for the PMU versions Instead of using a bunch of magic numbers, use the existing definitions that have been added since 8673e02e58410 ("arm64: perf: Add support for ARMv8.5-PMU 64-bit counters") Reviewed-by: Alexandru Elisei Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier --- arch/arm64/kvm/pmu-emul.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 72cd704a8368..cb16ca2eee92 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -23,11 +23,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); static u32 kvm_pmu_event_mask(struct kvm *kvm) { switch (kvm->arch.pmuver) { - case 1: /* ARMv8.0 */ + case ID_AA64DFR0_PMUVER_8_0: return GENMASK(9, 0); - case 4: /* ARMv8.1 */ - case 5: /* ARMv8.4 */ - case 6: /* ARMv8.5 */ + case ID_AA64DFR0_PMUVER_8_1: + case ID_AA64DFR0_PMUVER_8_4: + case ID_AA64DFR0_PMUVER_8_5: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); -- cgit v1.2.3 From 8c358b29e0dc69d5ced6acfea4cc3d1dcf10df27 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 28 Jan 2021 13:28:23 +0000 Subject: KVM: arm64: Correct spelling of DBGDIDR register The aarch32 debug ID register is called DBG*D*IDR (emphasis added), not DBGIDR, use the correct spelling. Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210128132823.35067-1-alexandru.elisei@arm.com --- arch/arm64/kvm/sys_regs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 5da536ab738d..d9ca200c8b1d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1710,7 +1710,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 }, }; -static bool trap_dbgidr(struct kvm_vcpu *vcpu, +static bool trap_dbgdidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -1757,8 +1757,8 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, * guest. Revisit this one day, would this principle change. */ static const struct sys_reg_desc cp14_regs[] = { - /* DBGIDR */ - { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, + /* DBGDIDR */ + { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr }, /* DBGDTRRXext */ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, -- cgit v1.2.3 From 54effa653246c35997f5e990e0134be5be09f9d1 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 3 Feb 2021 14:19:30 +0000 Subject: asm-generic: export: Stub EXPORT_SYMBOL with __DISABLE_EXPORTS It is currently possible to stub EXPORT_SYMBOL() macros in C code using __DISABLE_EXPORTS, which is necessary to run in constrained environments such as the EFI stub or the decompressor. But this currently doesn't apply to exports from assembly, which can lead to somewhat confusing situations. Consolidate the __DISABLE_EXPORTS infrastructure by checking it from asm-generic/export.h as well. Signed-off-by: Quentin Perret Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210203141931.615898-2-qperret@google.com --- include/asm-generic/export.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 365345f9a9e3..07a36a874dca 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -33,7 +33,7 @@ */ .macro ___EXPORT_SYMBOL name,val,sec -#ifdef CONFIG_MODULES +#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS) .section ___ksymtab\sec+\name,"a" .balign KSYM_ALIGN __ksymtab_\name: -- cgit v1.2.3 From bbc075e01ceac50e0a8353b520544f3089e94e44 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 3 Feb 2021 14:19:31 +0000 Subject: KVM: arm64: Stub EXPORT_SYMBOL for nVHE EL2 code In order to ensure the module loader does not get confused if a symbol is exported in EL2 nVHE code (as will be the case when we will compile e.g. lib/memset.S into the EL2 object), make sure to stub all exports using __DISABLE_EXPORTS in the nvhe folder. Suggested-by: Ard Biesheuvel Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210203141931.615898-3-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 1f1e351c5fe2..c9c121c8d5de 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -3,8 +3,8 @@ # Makefile for Kernel-based Virtual Machine module, HYP/nVHE part # -asflags-y := -D__KVM_NVHE_HYPERVISOR__ -ccflags-y := -D__KVM_NVHE_HYPERVISOR__ +asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS +ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o -- cgit v1.2.3 From bd2fae8da794b55bf2ac02632da3a151b10e664c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 1 Feb 2021 05:12:11 -0500 Subject: KVM: do not assume PTE is writable after follow_pfn In order to convert an HVA to a PFN, KVM usually tries to use the get_user_pages family of functinso. This however is not possible for VM_IO vmas; in that case, KVM instead uses follow_pfn. In doing this however KVM loses the information on whether the PFN is writable. That is usually not a problem because the main use of VM_IO vmas with KVM is for BARs in PCI device assignment, however it is a bug. To fix it, use follow_pte and check pte_write while under the protection of the PTE lock. The information can be used to fail hva_to_pfn_remapped or passed back to the caller via *writable. Usage of follow_pfn was introduced in commit add6a0cd1c5b ("KVM: MMU: try to fix up page faults before giving up", 2016-07-05); however, even older version have the same issue, all the way back to commit 2e2e3738af33 ("KVM: Handle vma regions with no backing page", 2008-07-20), as they also did not check whether the PFN was writable. Fixes: 2e2e3738af33 ("KVM: Handle vma regions with no backing page") Reported-by: David Stevens Cc: 3pvd@google.com Cc: Jann Horn Cc: Jason Gunthorpe Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8367d88ce39b..335a1a2b8edc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1904,9 +1904,11 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, kvm_pfn_t *p_pfn) { unsigned long pfn; + pte_t *ptep; + spinlock_t *ptl; int r; - r = follow_pfn(vma, addr, &pfn); + r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl); if (r) { /* * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does @@ -1921,14 +1923,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, if (r) return r; - r = follow_pfn(vma, addr, &pfn); + r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl); if (r) return r; + } + if (write_fault && !pte_write(*ptep)) { + pfn = KVM_PFN_ERR_RO_FAULT; + goto out; } if (writable) - *writable = true; + *writable = pte_write(*ptep); + pfn = pte_pfn(*ptep); /* * Get a reference here because callers of *hva_to_pfn* and @@ -1943,6 +1950,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, */ kvm_get_pfn(pfn); +out: + pte_unmap_unlock(ptep, ptl); *p_pfn = pfn; return 0; } -- cgit v1.2.3 From 19979fba9bfaeab427a8e106d915f0627c952828 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 22 Jan 2021 16:03:33 -0800 Subject: KVM: x86: Remove obsolete disabling of page faults in kvm_arch_vcpu_put() Remove the disabling of page faults across kvm_steal_time_set_preempted() as KVM now accesses the steal time struct (shared with the guest) via a cached mapping (see commit b043138246a4, "x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed".) The cache lookup is flagged as atomic, thus it would be a bug if KVM tried to resolve a new pfn, i.e. we want the splat that would be reached via might_fault(). Signed-off-by: Sean Christopherson Message-Id: <20210123000334.3123628-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c1650e26715b..18d005378a78 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4040,15 +4040,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) if (vcpu->preempted && !vcpu->arch.guest_state_protected) vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); - /* - * Disable page faults because we're in atomic context here. - * kvm_write_guest_offset_cached() would call might_fault() - * that relies on pagefault_disable() to tell if there's a - * bug. NOTE: the write to guest memory may not go through if - * during postcopy live migration or if there's heavy guest - * paging. - */ - pagefault_disable(); /* * kvm_memslots() will be called by * kvm_write_guest_offset_cached() so take the srcu lock. @@ -4056,7 +4047,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_steal_time_set_preempted(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); - pagefault_enable(); kvm_x86_ops.vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* -- cgit v1.2.3 From 15b51dc08a349f2f0832606c900b638a3dd19839 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 22 Jan 2021 16:03:34 -0800 Subject: KVM: x86: Take KVM's SRCU lock only if steal time update is needed Enter a SRCU critical section for a memslots lookup during steal time update if and only if a steal time update is actually needed. Taking the lock can be avoided if steal time is disabled by the guest, or if KVM knows it has already flagged the vCPU as being preempted. Reword the comment to be more precise as to exactly why memslots will be queried. Signed-off-by: Sean Christopherson Message-Id: <20210123000334.3123628-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 18d005378a78..fa1ec597d510 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4014,6 +4014,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { struct kvm_host_map map; struct kvm_steal_time *st; + int idx; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -4021,9 +4022,15 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) if (vcpu->arch.st.preempted) return; + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, &vcpu->arch.st.cache, true)) - return; + goto out; st = map.hva + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); @@ -4031,22 +4038,17 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); + +out: + srcu_read_unlock(&vcpu->kvm->srcu, idx); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - int idx; - if (vcpu->preempted && !vcpu->arch.guest_state_protected) vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); - /* - * kvm_memslots() will be called by - * kvm_write_guest_offset_cached() so take the srcu lock. - */ - idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_steal_time_set_preempted(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, idx); kvm_x86_ops.vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* -- cgit v1.2.3 From c910662c7c696ec0769766aaee5fc2fb54d921d5 Mon Sep 17 00:00:00 2001 From: Tian Tao Date: Mon, 25 Jan 2021 11:57:25 +0800 Subject: KVM: X86: use vzalloc() instead of vmalloc/memset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fixed the following warning: /virt/kvm/dirty_ring.c:70:20-27: WARNING: vzalloc should be used for ring -> dirty_gfns, instead of vmalloc/memset. Signed-off-by: Tian Tao Message-Id: <1611547045-13669-1-git-send-email-tiantao6@hisilicon.com> Signed-off-by: Paolo Bonzini --- virt/kvm/dirty_ring.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 9d01299563ee..790f17325f8d 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -67,10 +67,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size) { - ring->dirty_gfns = vmalloc(size); + ring->dirty_gfns = vzalloc(size); if (!ring->dirty_gfns) return -ENOMEM; - memset(ring->dirty_gfns, 0, size); ring->size = size / sizeof(struct kvm_dirty_gfn); ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries(); -- cgit v1.2.3 From 15e6a7e5324cc04a67891fc369ea834bbb7e7b42 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 22 Jan 2021 16:30:03 -0800 Subject: KVM: x86/mmu: Use boolean returns for (S)PTE accessors Return a 'bool' instead of an 'int' for various PTE accessors that are boolean in nature, e.g. is_shadow_present_pte(). Returning an int is goofy and potentially dangerous, e.g. if a flag being checked is moved into the upper 32 bits of a SPTE, then the compiler may silently squash the entire check since casting to an int is guaranteed to yield a return value of '0'. Opportunistically refactor is_last_spte() so that it naturally returns a bool value instead of letting it implicitly cast 0/1 to false/true. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210123003003.3137525-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 2 +- arch/x86/kvm/mmu/spte.h | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 261be1d2032b..2bb42eee5d5d 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -152,7 +152,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * * TODO: introduce APIs to split these two cases. */ -static inline int is_writable_pte(unsigned long pte) +static inline bool is_writable_pte(unsigned long pte) { return pte & PT_WRITABLE_MASK; } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 2b3a30bd38b0..398fd1bb13a7 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -185,23 +185,19 @@ static inline bool is_access_track_spte(u64 spte) return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; } -static inline int is_shadow_present_pte(u64 pte) +static inline bool is_shadow_present_pte(u64 pte) { return (pte != 0) && !is_mmio_spte(pte); } -static inline int is_large_pte(u64 pte) +static inline bool is_large_pte(u64 pte) { return pte & PT_PAGE_SIZE_MASK; } -static inline int is_last_spte(u64 pte, int level) +static inline bool is_last_spte(u64 pte, int level) { - if (level == PG_LEVEL_4K) - return 1; - if (is_large_pte(pte)) - return 1; - return 0; + return (level == PG_LEVEL_4K) || is_large_pte(pte); } static inline bool is_executable_pte(u64 spte) -- cgit v1.2.3 From 8fc517267fb28576dfca2380cc2497a2454b8fae Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 13 Jan 2021 12:50:30 -0800 Subject: KVM: x86: Zap the oldest MMU pages, not the newest Walk the list of MMU pages in reverse in kvm_mmu_zap_oldest_mmu_pages(). The list is FIFO, meaning new pages are inserted at the head and thus the oldest pages are at the tail. Using a "forward" iterator causes KVM to zap MMU pages that were just added, which obliterates guest performance once the max number of shadow MMU pages is reached. Fixes: 6b82ef2c9cf1 ("KVM: x86/mmu: Batch zap MMU pages when recycling oldest pages") Reported-by: Zdenek Kaspar Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20210113205030.3481307-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6d16481aa29d..ed861245ecf0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2417,7 +2417,7 @@ static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, return 0; restart: - list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) { + list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { /* * Don't zap active root pages, the page itself can't be freed * and zapping it will just force vCPUs to realloc and reload. -- cgit v1.2.3 From 7ca7f3b944929c99637522d849138ba15f97e3fe Mon Sep 17 00:00:00 2001 From: YANG LI Date: Mon, 11 Jan 2021 17:32:58 +0800 Subject: x86: kvm: style: Simplify bool comparison Fix the following coccicheck warning: ./arch/x86/kvm/x86.c:8012:5-48: WARNING: Comparison to bool Signed-off-by: YANG LI Reported-by: Abaci Robot Message-Id: <1610357578-66081-1-git-send-email-abaci-bugfix@linux.alibaba.com> Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fa1ec597d510..8b8f5ee839c2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8029,7 +8029,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) return -KVM_EOPNOTSUPP; - if (kvm_get_walltime_and_clockread(&ts, &cycle) == false) + if (!kvm_get_walltime_and_clockread(&ts, &cycle)) return -KVM_EOPNOTSUPP; clock_pairing.sec = ts.tv_sec; -- cgit v1.2.3 From b85a0425d8056f3bd8d0a94ecdddf2a39d32a801 Mon Sep 17 00:00:00 2001 From: Kyung Min Park Date: Tue, 5 Jan 2021 08:49:08 +0800 Subject: Enumerate AVX Vector Neural Network instructions Add AVX version of the Vector Neural Network (VNNI) Instructions. A processor supports AVX VNNI instructions if CPUID.0x07.0x1:EAX[4] is present. The following instructions are available when this feature is present. 1. VPDPBUS: Multiply and Add Unsigned and Signed Bytes 2. VPDPBUSDS: Multiply and Add Unsigned and Signed Bytes with Saturation 3. VPDPWSSD: Multiply and Add Signed Word Integers 4. VPDPWSSDS: Multiply and Add Signed Integers with Saturation The only in-kernel usage of this is kvm passthrough. The CPU feature flag is shown as "avx_vnni" in /proc/cpuinfo. This instruction is currently documented in the latest "extensions" manual (ISE). It will appear in the "main" manual (SDM) in the future. Signed-off-by: Kyung Min Park Signed-off-by: Yang Zhong Reviewed-by: Tony Luck Message-Id: <20210105004909.42000-2-yang.zhong@intel.com> Acked-by: Borislav Petkov Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 84b887825f12..bc33e319db56 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -294,6 +294,7 @@ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ +#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ -- cgit v1.2.3 From 1085a6b585d7d1c441cd10fdb4c7a4d96a22eba7 Mon Sep 17 00:00:00 2001 From: Yang Zhong Date: Tue, 5 Jan 2021 08:49:09 +0800 Subject: KVM: Expose AVX_VNNI instruction to guset Expose AVX (VEX-encoded) versions of the Vector Neural Network Instructions to guest. The bit definition: CPUID.(EAX=7,ECX=1):EAX[bit 4] AVX_VNNI The following instructions are available when this feature is present in the guest. 1. VPDPBUS: Multiply and Add Unsigned and Signed Bytes 2. VPDPBUSDS: Multiply and Add Unsigned and Signed Bytes with Saturation 3. VPDPWSSD: Multiply and Add Signed Word Integers 4. VPDPWSSDS: Multiply and Add Signed Integers with Saturation This instruction is currently documented in the latest "extensions" manual (ISE). It will appear in the "main" manual (SDM) in the future. Signed-off-by: Yang Zhong Reviewed-by: Tony Luck Message-Id: <20210105004909.42000-3-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 38172ca627d3..ce658c9fa002 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -434,7 +434,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); kvm_cpu_cap_mask(CPUID_7_1_EAX, - F(AVX512_BF16) + F(AVX_VNNI) | F(AVX512_BF16) ); kvm_cpu_cap_mask(CPUID_D_1_EAX, -- cgit v1.2.3 From 678e90a349a4c22082c1609335ea688f5d4a7139 Mon Sep 17 00:00:00 2001 From: Peter Shier Date: Thu, 5 Nov 2020 14:38:23 -0800 Subject: KVM: selftests: Test IPI to halted vCPU in xAPIC while backing page moves When a guest is using xAPIC KVM allocates a backing page for the required EPT entry for the APIC access address set in the VMCS. If mm decides to move that page the KVM mmu notifier will update the VMCS with the new HPA. This test induces a page move to test that APIC access continues to work correctly. It is a directed test for commit e649b3f0188f "KVM: x86: Fix APIC page invalidation race". Tested: ran for 1 hour on a skylake, migrating backing page every 1ms Depends on patch "selftests: kvm: Add exception handling to selftests" from aaronlewis@google.com that has not yet been queued. Signed-off-by: Peter Shier Reviewed-by: Jim Mattson Reviewed-by: Ricardo Koller Message-Id: <20201105223823.850068-1-pshier@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/processor.h | 20 + .../testing/selftests/kvm/x86_64/xapic_ipi_test.c | 544 +++++++++++++++++++++ 4 files changed, 566 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index ce8f4ad39684..eaba522068c7 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -24,6 +24,7 @@ /x86_64/vmx_preemption_timer_test /x86_64/vmx_set_nested_state_test /x86_64/vmx_tsc_adjust_test +/x86_64/xapic_ipi_test /x86_64/xss_msr_test /demand_paging_test /dirty_log_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index fe41c6a0fa67..b06419813f7f 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -56,6 +56,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test +TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 90cd5984751b..74ce0fe420ab 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -406,8 +406,27 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, #define X86_CR0_CD (1UL<<30) /* Cache Disable */ #define X86_CR0_PG (1UL<<31) /* Paging */ +#define APIC_DEFAULT_GPA 0xfee00000ULL + +/* APIC base address MSR and fields */ +#define MSR_IA32_APICBASE 0x0000001b +#define MSR_IA32_APICBASE_BSP (1<<8) +#define MSR_IA32_APICBASE_EXTD (1<<10) +#define MSR_IA32_APICBASE_ENABLE (1<<11) +#define MSR_IA32_APICBASE_BASE (0xfffff<<12) +#define GET_APIC_BASE(x) (((x) >> 12) << 12) + #define APIC_BASE_MSR 0x800 #define X2APIC_ENABLE (1UL << 10) +#define APIC_ID 0x20 +#define APIC_LVR 0x30 +#define GET_APIC_ID_FIELD(x) (((x) >> 24) & 0xFF) +#define APIC_TASKPRI 0x80 +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_SPIV 0xF0 +#define APIC_SPIV_FOCUS_DISABLED (1 << 9) +#define APIC_SPIV_APIC_ENABLED (1 << 8) #define APIC_ICR 0x300 #define APIC_DEST_SELF 0x40000 #define APIC_DEST_ALLINC 0x80000 @@ -432,6 +451,7 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, #define APIC_DM_EXTINT 0x00700 #define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 +#define SET_APIC_DEST_FIELD(x) ((x) << 24) /* VMX_EPT_VPID_CAP bits */ #define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21) diff --git a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c new file mode 100644 index 000000000000..2f964cdc273c --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * xapic_ipi_test + * + * Copyright (C) 2020, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Test that when the APIC is in xAPIC mode, a vCPU can send an IPI to wake + * another vCPU that is halted when KVM's backing page for the APIC access + * address has been moved by mm. + * + * The test starts two vCPUs: one that sends IPIs and one that continually + * executes HLT. The sender checks that the halter has woken from the HLT and + * has reentered HLT before sending the next IPI. While the vCPUs are running, + * the host continually calls migrate_pages to move all of the process' pages + * amongst the available numa nodes on the machine. + * + * Migration is a command line option. When used on non-numa machines will + * exit with error. Test is still usefull on non-numa for testing IPIs. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include +#include +#include +#include +#include + +#include "kvm_util.h" +#include "numaif.h" +#include "processor.h" +#include "test_util.h" +#include "vmx.h" + +/* Default running time for the test */ +#define DEFAULT_RUN_SECS 3 + +/* Default delay between migrate_pages calls (microseconds) */ +#define DEFAULT_DELAY_USECS 500000 + +#define HALTER_VCPU_ID 0 +#define SENDER_VCPU_ID 1 + +volatile uint32_t *apic_base = (volatile uint32_t *)APIC_DEFAULT_GPA; + +/* + * Vector for IPI from sender vCPU to halting vCPU. + * Value is arbitrary and was chosen for the alternating bit pattern. Any + * value should work. + */ +#define IPI_VECTOR 0xa5 + +/* + * Incremented in the IPI handler. Provides evidence to the sender that the IPI + * arrived at the destination + */ +static volatile uint64_t ipis_rcvd; + +/* Data struct shared between host main thread and vCPUs */ +struct test_data_page { + uint32_t halter_apic_id; + volatile uint64_t hlt_count; + volatile uint64_t wake_count; + uint64_t ipis_sent; + uint64_t migrations_attempted; + uint64_t migrations_completed; + uint32_t icr; + uint32_t icr2; + uint32_t halter_tpr; + uint32_t halter_ppr; + + /* + * Record local version register as a cross-check that APIC access + * worked. Value should match what KVM reports (APIC_VERSION in + * arch/x86/kvm/lapic.c). If test is failing, check that values match + * to determine whether APIC access exits are working. + */ + uint32_t halter_lvr; +}; + +struct thread_params { + struct test_data_page *data; + struct kvm_vm *vm; + uint32_t vcpu_id; + uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */ +}; + +uint32_t read_apic_reg(uint reg) +{ + return apic_base[reg >> 2]; +} + +void write_apic_reg(uint reg, uint32_t val) +{ + apic_base[reg >> 2] = val; +} + +void disable_apic(void) +{ + wrmsr(MSR_IA32_APICBASE, + rdmsr(MSR_IA32_APICBASE) & + ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD)); +} + +void enable_xapic(void) +{ + uint64_t val = rdmsr(MSR_IA32_APICBASE); + + /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */ + if (val & MSR_IA32_APICBASE_EXTD) { + disable_apic(); + wrmsr(MSR_IA32_APICBASE, + rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE); + } else if (!(val & MSR_IA32_APICBASE_ENABLE)) { + wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE); + } + + /* + * Per SDM: reset value of spurious interrupt vector register has the + * APIC software enabled bit=0. It must be enabled in addition to the + * enable bit in the MSR. + */ + val = read_apic_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED; + write_apic_reg(APIC_SPIV, val); +} + +void verify_apic_base_addr(void) +{ + uint64_t msr = rdmsr(MSR_IA32_APICBASE); + uint64_t base = GET_APIC_BASE(msr); + + GUEST_ASSERT(base == APIC_DEFAULT_GPA); +} + +static void halter_guest_code(struct test_data_page *data) +{ + verify_apic_base_addr(); + enable_xapic(); + + data->halter_apic_id = GET_APIC_ID_FIELD(read_apic_reg(APIC_ID)); + data->halter_lvr = read_apic_reg(APIC_LVR); + + /* + * Loop forever HLTing and recording halts & wakes. Disable interrupts + * each time around to minimize window between signaling the pending + * halt to the sender vCPU and executing the halt. No need to disable on + * first run as this vCPU executes first and the host waits for it to + * signal going into first halt before starting the sender vCPU. Record + * TPR and PPR for diagnostic purposes in case the test fails. + */ + for (;;) { + data->halter_tpr = read_apic_reg(APIC_TASKPRI); + data->halter_ppr = read_apic_reg(APIC_PROCPRI); + data->hlt_count++; + asm volatile("sti; hlt; cli"); + data->wake_count++; + } +} + +/* + * Runs on halter vCPU when IPI arrives. Write an arbitrary non-zero value to + * enable diagnosing errant writes to the APIC access address backing page in + * case of test failure. + */ +static void guest_ipi_handler(struct ex_regs *regs) +{ + ipis_rcvd++; + write_apic_reg(APIC_EOI, 77); +} + +static void sender_guest_code(struct test_data_page *data) +{ + uint64_t last_wake_count; + uint64_t last_hlt_count; + uint64_t last_ipis_rcvd_count; + uint32_t icr_val; + uint32_t icr2_val; + uint64_t tsc_start; + + verify_apic_base_addr(); + enable_xapic(); + + /* + * Init interrupt command register for sending IPIs + * + * Delivery mode=fixed, per SDM: + * "Delivers the interrupt specified in the vector field to the target + * processor." + * + * Destination mode=physical i.e. specify target by its local APIC + * ID. This vCPU assumes that the halter vCPU has already started and + * set data->halter_apic_id. + */ + icr_val = (APIC_DEST_PHYSICAL | APIC_DM_FIXED | IPI_VECTOR); + icr2_val = SET_APIC_DEST_FIELD(data->halter_apic_id); + data->icr = icr_val; + data->icr2 = icr2_val; + + last_wake_count = data->wake_count; + last_hlt_count = data->hlt_count; + last_ipis_rcvd_count = ipis_rcvd; + for (;;) { + /* + * Send IPI to halter vCPU. + * First IPI can be sent unconditionally because halter vCPU + * starts earlier. + */ + write_apic_reg(APIC_ICR2, icr2_val); + write_apic_reg(APIC_ICR, icr_val); + data->ipis_sent++; + + /* + * Wait up to ~1 sec for halter to indicate that it has: + * 1. Received the IPI + * 2. Woken up from the halt + * 3. Gone back into halt + * Current CPUs typically run at 2.x Ghz which is ~2 + * billion ticks per second. + */ + tsc_start = rdtsc(); + while (rdtsc() - tsc_start < 2000000000) { + if ((ipis_rcvd != last_ipis_rcvd_count) && + (data->wake_count != last_wake_count) && + (data->hlt_count != last_hlt_count)) + break; + } + + GUEST_ASSERT((ipis_rcvd != last_ipis_rcvd_count) && + (data->wake_count != last_wake_count) && + (data->hlt_count != last_hlt_count)); + + last_wake_count = data->wake_count; + last_hlt_count = data->hlt_count; + last_ipis_rcvd_count = ipis_rcvd; + } +} + +static void *vcpu_thread(void *arg) +{ + struct thread_params *params = (struct thread_params *)arg; + struct ucall uc; + int old; + int r; + unsigned int exit_reason; + + r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old); + TEST_ASSERT(r == 0, + "pthread_setcanceltype failed on vcpu_id=%u with errno=%d", + params->vcpu_id, r); + + fprintf(stderr, "vCPU thread running vCPU %u\n", params->vcpu_id); + vcpu_run(params->vm, params->vcpu_id); + exit_reason = vcpu_state(params->vm, params->vcpu_id)->exit_reason; + + TEST_ASSERT(exit_reason == KVM_EXIT_IO, + "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO", + params->vcpu_id, exit_reason, exit_reason_str(exit_reason)); + + if (get_ucall(params->vm, params->vcpu_id, &uc) == UCALL_ABORT) { + TEST_ASSERT(false, + "vCPU %u exited with error: %s.\n" + "Sending vCPU sent %lu IPIs to halting vCPU\n" + "Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n" + "Halter TPR=%#x PPR=%#x LVR=%#x\n" + "Migrations attempted: %lu\n" + "Migrations completed: %lu\n", + params->vcpu_id, (const char *)uc.args[0], + params->data->ipis_sent, params->data->hlt_count, + params->data->wake_count, + *params->pipis_rcvd, params->data->halter_tpr, + params->data->halter_ppr, params->data->halter_lvr, + params->data->migrations_attempted, + params->data->migrations_completed); + } + + return NULL; +} + +static void cancel_join_vcpu_thread(pthread_t thread, uint32_t vcpu_id) +{ + void *retval; + int r; + + r = pthread_cancel(thread); + TEST_ASSERT(r == 0, + "pthread_cancel on vcpu_id=%d failed with errno=%d", + vcpu_id, r); + + r = pthread_join(thread, &retval); + TEST_ASSERT(r == 0, + "pthread_join on vcpu_id=%d failed with errno=%d", + vcpu_id, r); + TEST_ASSERT(retval == PTHREAD_CANCELED, + "expected retval=%p, got %p", PTHREAD_CANCELED, + retval); +} + +void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, + uint64_t *pipis_rcvd) +{ + long pages_not_moved; + unsigned long nodemask = 0; + unsigned long nodemasks[sizeof(nodemask) * 8]; + int nodes = 0; + time_t start_time, last_update, now; + time_t interval_secs = 1; + int i, r; + int from, to; + unsigned long bit; + uint64_t hlt_count; + uint64_t wake_count; + uint64_t ipis_sent; + + fprintf(stderr, "Calling migrate_pages every %d microseconds\n", + delay_usecs); + + /* Get set of first 64 numa nodes available */ + r = get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8, + 0, MPOL_F_MEMS_ALLOWED); + TEST_ASSERT(r == 0, "get_mempolicy failed errno=%d", errno); + + fprintf(stderr, "Numa nodes found amongst first %lu possible nodes " + "(each 1-bit indicates node is present): %#lx\n", + sizeof(nodemask) * 8, nodemask); + + /* Init array of masks containing a single-bit in each, one for each + * available node. migrate_pages called below requires specifying nodes + * as bit masks. + */ + for (i = 0, bit = 1; i < sizeof(nodemask) * 8; i++, bit <<= 1) { + if (nodemask & bit) { + nodemasks[nodes] = nodemask & bit; + nodes++; + } + } + + TEST_ASSERT(nodes > 1, + "Did not find at least 2 numa nodes. Can't do migration\n"); + + fprintf(stderr, "Migrating amongst %d nodes found\n", nodes); + + from = 0; + to = 1; + start_time = time(NULL); + last_update = start_time; + + ipis_sent = data->ipis_sent; + hlt_count = data->hlt_count; + wake_count = data->wake_count; + + while ((int)(time(NULL) - start_time) < run_secs) { + data->migrations_attempted++; + + /* + * migrate_pages with PID=0 will migrate all pages of this + * process between the nodes specified as bitmasks. The page + * backing the APIC access address belongs to this process + * because it is allocated by KVM in the context of the + * KVM_CREATE_VCPU ioctl. If that assumption ever changes this + * test may break or give a false positive signal. + */ + pages_not_moved = migrate_pages(0, sizeof(nodemasks[from]), + &nodemasks[from], + &nodemasks[to]); + if (pages_not_moved < 0) + fprintf(stderr, + "migrate_pages failed, errno=%d\n", errno); + else if (pages_not_moved > 0) + fprintf(stderr, + "migrate_pages could not move %ld pages\n", + pages_not_moved); + else + data->migrations_completed++; + + from = to; + to++; + if (to == nodes) + to = 0; + + now = time(NULL); + if (((now - start_time) % interval_secs == 0) && + (now != last_update)) { + last_update = now; + fprintf(stderr, + "%lu seconds: Migrations attempted=%lu completed=%lu, " + "IPIs sent=%lu received=%lu, HLTs=%lu wakes=%lu\n", + now - start_time, data->migrations_attempted, + data->migrations_completed, + data->ipis_sent, *pipis_rcvd, + data->hlt_count, data->wake_count); + + TEST_ASSERT(ipis_sent != data->ipis_sent && + hlt_count != data->hlt_count && + wake_count != data->wake_count, + "IPI, HLT and wake count have not increased " + "in the last %lu seconds. " + "HLTer is likely hung.\n", interval_secs); + + ipis_sent = data->ipis_sent; + hlt_count = data->hlt_count; + wake_count = data->wake_count; + } + usleep(delay_usecs); + } +} + +void get_cmdline_args(int argc, char *argv[], int *run_secs, + bool *migrate, int *delay_usecs) +{ + for (;;) { + int opt = getopt(argc, argv, "s:d:m"); + + if (opt == -1) + break; + switch (opt) { + case 's': + *run_secs = parse_size(optarg); + break; + case 'm': + *migrate = true; + break; + case 'd': + *delay_usecs = parse_size(optarg); + break; + default: + TEST_ASSERT(false, + "Usage: -s . Default is %d seconds.\n" + "-m adds calls to migrate_pages while vCPUs are running." + " Default is no migrations.\n" + "-d - delay between migrate_pages() calls." + " Default is %d microseconds.\n", + DEFAULT_RUN_SECS, DEFAULT_DELAY_USECS); + } + } +} + +int main(int argc, char *argv[]) +{ + int r; + int wait_secs; + const int max_halter_wait = 10; + int run_secs = 0; + int delay_usecs = 0; + struct test_data_page *data; + vm_vaddr_t test_data_page_vaddr; + bool migrate = false; + pthread_t threads[2]; + struct thread_params params[2]; + struct kvm_vm *vm; + uint64_t *pipis_rcvd; + + get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs); + if (run_secs <= 0) + run_secs = DEFAULT_RUN_SECS; + if (delay_usecs <= 0) + delay_usecs = DEFAULT_DELAY_USECS; + + vm = vm_create_default(HALTER_VCPU_ID, 0, halter_guest_code); + params[0].vm = vm; + params[1].vm = vm; + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID); + vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler); + + virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA, 0); + + vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code); + + test_data_page_vaddr = vm_vaddr_alloc(vm, 0x1000, 0x1000, 0, 0); + data = + (struct test_data_page *)addr_gva2hva(vm, test_data_page_vaddr); + memset(data, 0, sizeof(*data)); + params[0].data = data; + params[1].data = data; + + vcpu_args_set(vm, HALTER_VCPU_ID, 1, test_data_page_vaddr); + vcpu_args_set(vm, SENDER_VCPU_ID, 1, test_data_page_vaddr); + + pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd); + params[0].pipis_rcvd = pipis_rcvd; + params[1].pipis_rcvd = pipis_rcvd; + + /* Start halter vCPU thread and wait for it to execute first HLT. */ + params[0].vcpu_id = HALTER_VCPU_ID; + r = pthread_create(&threads[0], NULL, vcpu_thread, ¶ms[0]); + TEST_ASSERT(r == 0, + "pthread_create halter failed errno=%d", errno); + fprintf(stderr, "Halter vCPU thread started\n"); + + wait_secs = 0; + while ((wait_secs < max_halter_wait) && !data->hlt_count) { + sleep(1); + wait_secs++; + } + + TEST_ASSERT(data->hlt_count, + "Halter vCPU did not execute first HLT within %d seconds", + max_halter_wait); + + fprintf(stderr, + "Halter vCPU thread reported its APIC ID: %u after %d seconds.\n", + data->halter_apic_id, wait_secs); + + params[1].vcpu_id = SENDER_VCPU_ID; + r = pthread_create(&threads[1], NULL, vcpu_thread, ¶ms[1]); + TEST_ASSERT(r == 0, "pthread_create sender failed errno=%d", errno); + + fprintf(stderr, + "IPI sender vCPU thread started. Letting vCPUs run for %d seconds.\n", + run_secs); + + if (!migrate) + sleep(run_secs); + else + do_migrations(data, run_secs, delay_usecs, pipis_rcvd); + + /* + * Cancel threads and wait for them to stop. + */ + cancel_join_vcpu_thread(threads[0], HALTER_VCPU_ID); + cancel_join_vcpu_thread(threads[1], SENDER_VCPU_ID); + + fprintf(stderr, + "Test successful after running for %d seconds.\n" + "Sending vCPU sent %lu IPIs to halting vCPU\n" + "Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n" + "Halter APIC ID=%#x\n" + "Sender ICR value=%#x ICR2 value=%#x\n" + "Halter TPR=%#x PPR=%#x LVR=%#x\n" + "Migrations attempted: %lu\n" + "Migrations completed: %lu\n", + run_secs, data->ipis_sent, + data->hlt_count, data->wake_count, *pipis_rcvd, + data->halter_apic_id, + data->icr, data->icr2, + data->halter_tpr, data->halter_ppr, data->halter_lvr, + data->migrations_attempted, data->migrations_completed); + + kvm_vm_free(vm); + + return 0; +} -- cgit v1.2.3 From c5e2184d1544f9e56140791eff1a351bea2e63b9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 14 Jan 2021 16:40:51 -0800 Subject: KVM: x86/mmu: Remove the defunct update_pte() paging hook Remove the update_pte() shadow paging logic, which was obsoleted by commit 4731d4c7a077 ("KVM: MMU: out of sync shadow core"), but never removed. As pointed out by Yu, KVM never write protects leaf page tables for the purposes of shadow paging, and instead marks their associated shadow page as unsync so that the guest can write PTEs at will. The update_pte() path, which predates the unsync logic, optimizes COW scenarios by refreshing leaf SPTEs when they are written, as opposed to zapping the SPTE, restarting the guest, and installing the new SPTE on the subsequent fault. Since KVM no longer write-protects leaf page tables, update_pte() is unreachable and can be dropped. Reported-by: Yu Zhang Signed-off-by: Sean Christopherson Message-Id: <20210115004051.4099250-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 --- arch/x86/kvm/mmu/mmu.c | 49 ++--------------------------------------- arch/x86/kvm/x86.c | 1 - 3 files changed, 2 insertions(+), 51 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3d6616f6f6ef..ed575c5655dd 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -358,8 +358,6 @@ struct kvm_mmu { int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); - void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *spte, const void *pte); hpa_t root_hpa; gpa_t root_pgd; union kvm_mmu_role mmu_role; @@ -1031,7 +1029,6 @@ struct kvm_arch { struct kvm_vm_stat { ulong mmu_shadow_zapped; ulong mmu_pte_write; - ulong mmu_pte_updated; ulong mmu_pde_zapped; ulong mmu_flooded; ulong mmu_recycled; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ed861245ecf0..8740ac1a48cb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1723,13 +1723,6 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu, return 0; } -static void nonpaging_update_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - const void *pte) -{ - WARN_ON(1); -} - #define KVM_PAGE_ARRAY_NR 16 struct kvm_mmu_pages { @@ -3813,7 +3806,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, context->gva_to_gpa = nonpaging_gva_to_gpa; context->sync_page = nonpaging_sync_page; context->invlpg = NULL; - context->update_pte = nonpaging_update_pte; context->root_level = 0; context->shadow_root_level = PT32E_ROOT_LEVEL; context->direct_map = true; @@ -4395,7 +4387,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu, context->gva_to_gpa = paging64_gva_to_gpa; context->sync_page = paging64_sync_page; context->invlpg = paging64_invlpg; - context->update_pte = paging64_update_pte; context->shadow_root_level = level; context->direct_map = false; } @@ -4424,7 +4415,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu, context->gva_to_gpa = paging32_gva_to_gpa; context->sync_page = paging32_sync_page; context->invlpg = paging32_invlpg; - context->update_pte = paging32_update_pte; context->shadow_root_level = PT32E_ROOT_LEVEL; context->direct_map = false; } @@ -4506,7 +4496,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->page_fault = kvm_tdp_page_fault; context->sync_page = nonpaging_sync_page; context->invlpg = NULL; - context->update_pte = nonpaging_update_pte; context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu); context->direct_map = true; context->get_guest_pgd = get_cr3; @@ -4678,7 +4667,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->gva_to_gpa = ept_gva_to_gpa; context->sync_page = ept_sync_page; context->invlpg = ept_invlpg; - context->update_pte = ept_update_pte; context->root_level = level; context->direct_map = false; context->mmu_role.as_u64 = new_role.as_u64; @@ -4826,19 +4814,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_mmu_unload); -static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, u64 *spte, - const void *new) -{ - if (sp->role.level != PG_LEVEL_4K) { - ++vcpu->kvm->stat.mmu_pde_zapped; - return; - } - - ++vcpu->kvm->stat.mmu_pte_updated; - vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); -} - static bool need_remote_flush(u64 old, u64 new) { if (!is_shadow_present_pte(old)) @@ -4954,22 +4929,6 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) return spte; } -/* - * Ignore various flags when determining if a SPTE can be immediately - * overwritten for the current MMU. - * - level: explicitly checked in mmu_pte_write_new_pte(), and will never - * match the current MMU role, as MMU's level tracks the root level. - * - access: updated based on the new guest PTE - * - quadrant: handled by get_written_sptes() - * - invalid: always false (loop only walks valid shadow pages) - */ -static const union kvm_mmu_page_role role_ign = { - .level = 0xf, - .access = 0x7, - .quadrant = 0x3, - .invalid = 0x1, -}; - static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes, struct kvm_page_track_notifier_node *node) @@ -5020,14 +4979,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, local_flush = true; while (npte--) { - u32 base_role = vcpu->arch.mmu->mmu_role.base.word; - entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); - if (gentry && - !((sp->role.word ^ base_role) & ~role_ign.word) && - rmap_can_add(vcpu)) - mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); + if (gentry && sp->role.level != PG_LEVEL_4K) + ++vcpu->kvm->stat.mmu_pde_zapped; if (need_remote_flush(entry, *spte)) remote_flush = true; ++spte; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8b8f5ee839c2..3905ca46b313 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -234,7 +234,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped), VM_STAT("mmu_pte_write", mmu_pte_write), - VM_STAT("mmu_pte_updated", mmu_pte_updated), VM_STAT("mmu_pde_zapped", mmu_pde_zapped), VM_STAT("mmu_flooded", mmu_flooded), VM_STAT("mmu_recycled", mmu_recycled), -- cgit v1.2.3 From f9224a5235912fbfaa9f642e61e3f943ae0628ad Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Jan 2021 13:42:48 -0800 Subject: KVM: selftests: Rename timespec_diff_now to timespec_elapsed In response to some earlier comments from Peter Xu, rename timespec_diff_now to the much more sensible timespec_elapsed. No functional change intended. Reviewed-by: Jacob Xu Reviewed-by: Makarand Sonare Signed-off-by: Ben Gardon Message-Id: <20210112214253.463999-2-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 8 ++++---- tools/testing/selftests/kvm/dirty_log_perf_test.c | 14 +++++++------- tools/testing/selftests/kvm/include/test_util.h | 2 +- tools/testing/selftests/kvm/lib/test_util.c | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index cdad1eca72f7..a1cd234e6f5e 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -64,7 +64,7 @@ static void *vcpu_worker(void *data) exit_reason_str(run->exit_reason)); } - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id, ts_diff.tv_sec, ts_diff.tv_nsec); @@ -95,7 +95,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) return r; } - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid, timespec_to_ns(ts_diff)); @@ -190,7 +190,7 @@ static void *uffd_handler_thread_fn(void *arg) pages++; } - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n", pages, ts_diff.tv_sec, ts_diff.tv_nsec, pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0)); @@ -339,7 +339,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id); } - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); pr_info("All vCPU threads joined\n"); diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 2283a0ec74a9..16efe6589b43 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -52,7 +52,7 @@ static void *vcpu_worker(void *data) clock_gettime(CLOCK_MONOTONIC, &start); ret = _vcpu_run(vm, vcpu_id); - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC, @@ -149,7 +149,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_debug("Waiting for vcpu_last_completed_iteration == %lu\n", iteration); - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); pr_info("Populate memory time: %ld.%.9lds\n", ts_diff.tv_sec, ts_diff.tv_nsec); @@ -157,7 +157,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) clock_gettime(CLOCK_MONOTONIC, &start); vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, KVM_MEM_LOG_DIRTY_PAGES); - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); pr_info("Enabling dirty logging time: %ld.%.9lds\n\n", ts_diff.tv_sec, ts_diff.tv_nsec); @@ -176,7 +176,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) vcpu_id, iteration); } - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff); pr_info("Iteration %lu dirty memory time: %ld.%.9lds\n", iteration, ts_diff.tv_sec, ts_diff.tv_nsec); @@ -184,7 +184,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) clock_gettime(CLOCK_MONOTONIC, &start); kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap); - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); get_dirty_log_total = timespec_add(get_dirty_log_total, ts_diff); pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n", @@ -195,7 +195,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0, host_num_pages); - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); clear_dirty_log_total = timespec_add(clear_dirty_log_total, ts_diff); pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n", @@ -211,7 +211,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) /* Disable dirty logging */ clock_gettime(CLOCK_MONOTONIC, &start); vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0); - ts_diff = timespec_diff_now(start); + ts_diff = timespec_elapsed(start); pr_info("Disabling dirty logging time: %ld.%.9lds\n", ts_diff.tv_sec, ts_diff.tv_nsec); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index ffffa560436b..b86090ef82da 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -64,7 +64,7 @@ int64_t timespec_to_ns(struct timespec ts); struct timespec timespec_add_ns(struct timespec ts, int64_t ns); struct timespec timespec_add(struct timespec ts1, struct timespec ts2); struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); -struct timespec timespec_diff_now(struct timespec start); +struct timespec timespec_elapsed(struct timespec start); struct timespec timespec_div(struct timespec ts, int divisor); #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 8e04c0b1608e..5f87ed32caf5 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -84,7 +84,7 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) return timespec_add_ns((struct timespec){0}, ns1 - ns2); } -struct timespec timespec_diff_now(struct timespec start) +struct timespec timespec_elapsed(struct timespec start) { struct timespec end; -- cgit v1.2.3 From 89dc52946a165b5396d4b84717d36ded8c5783c7 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Jan 2021 13:42:49 -0800 Subject: KVM: selftests: Avoid flooding debug log while populating memory Peter Xu pointed out that a log message printed while waiting for the memory population phase of the dirty_log_perf_test will flood the debug logs as there is no delay after printing the message. Since the message does not provide much value anyway, remove it. Reviewed-by: Jacob Xu Signed-off-by: Ben Gardon Message-Id: <20210112214253.463999-3-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_perf_test.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 16efe6589b43..15a9c45bdb5f 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -146,8 +146,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) /* Allow the vCPU to populate memory */ pr_debug("Starting iteration %lu - Populating\n", iteration); while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration) - pr_debug("Waiting for vcpu_last_completed_iteration == %lu\n", - iteration); + ; ts_diff = timespec_elapsed(start); pr_info("Populate memory time: %ld.%.9lds\n", @@ -171,9 +170,9 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_debug("Starting iteration %lu\n", iteration); for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration) - pr_debug("Waiting for vCPU %d vcpu_last_completed_iteration == %lu\n", - vcpu_id, iteration); + while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) + != iteration) + ; } ts_diff = timespec_elapsed(start); -- cgit v1.2.3 From 2d501238bc257ae86b345cb23fae3fd1af14687e Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Jan 2021 13:42:50 -0800 Subject: KVM: selftests: Convert iterations to int in dirty_log_perf_test In order to add an iteration -1 to indicate that the memory population phase has not yet completed, convert the interations counters to ints. No functional change intended. Reviewed-by: Jacob Xu Signed-off-by: Ben Gardon Message-Id: <20210112214253.463999-4-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_perf_test.c | 26 +++++++++++------------ 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 15a9c45bdb5f..3875f22d7283 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -28,8 +28,8 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; /* Host variables */ static u64 dirty_log_manual_caps; static bool host_quit; -static uint64_t iteration; -static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS]; +static int iteration; +static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; static void *vcpu_worker(void *data) { @@ -48,7 +48,7 @@ static void *vcpu_worker(void *data) run = vcpu_state(vm, vcpu_id); while (!READ_ONCE(host_quit)) { - uint64_t current_iteration = READ_ONCE(iteration); + int current_iteration = READ_ONCE(iteration); clock_gettime(CLOCK_MONOTONIC, &start); ret = _vcpu_run(vm, vcpu_id); @@ -61,17 +61,17 @@ static void *vcpu_worker(void *data) pr_debug("Got sync event from vCPU %d\n", vcpu_id); vcpu_last_completed_iteration[vcpu_id] = current_iteration; - pr_debug("vCPU %d updated last completed iteration to %lu\n", + pr_debug("vCPU %d updated last completed iteration to %d\n", vcpu_id, vcpu_last_completed_iteration[vcpu_id]); if (current_iteration) { pages_count += vcpu_args->pages; total = timespec_add(total, ts_diff); - pr_debug("vCPU %d iteration %lu dirty memory time: %ld.%.9lds\n", + pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n", vcpu_id, current_iteration, ts_diff.tv_sec, ts_diff.tv_nsec); } else { - pr_debug("vCPU %d iteration %lu populate memory time: %ld.%.9lds\n", + pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n", vcpu_id, current_iteration, ts_diff.tv_sec, ts_diff.tv_nsec); } @@ -81,7 +81,7 @@ static void *vcpu_worker(void *data) } avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]); - pr_debug("\nvCPU %d dirtied 0x%lx pages over %lu iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", + pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id], total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec); @@ -144,7 +144,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) } /* Allow the vCPU to populate memory */ - pr_debug("Starting iteration %lu - Populating\n", iteration); + pr_debug("Starting iteration %d - Populating\n", iteration); while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration) ; @@ -168,7 +168,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) clock_gettime(CLOCK_MONOTONIC, &start); iteration++; - pr_debug("Starting iteration %lu\n", iteration); + pr_debug("Starting iteration %d\n", iteration); for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration) @@ -177,7 +177,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ts_diff = timespec_elapsed(start); vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff); - pr_info("Iteration %lu dirty memory time: %ld.%.9lds\n", + pr_info("Iteration %d dirty memory time: %ld.%.9lds\n", iteration, ts_diff.tv_sec, ts_diff.tv_nsec); clock_gettime(CLOCK_MONOTONIC, &start); @@ -186,7 +186,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ts_diff = timespec_elapsed(start); get_dirty_log_total = timespec_add(get_dirty_log_total, ts_diff); - pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n", + pr_info("Iteration %d get dirty log time: %ld.%.9lds\n", iteration, ts_diff.tv_sec, ts_diff.tv_nsec); if (dirty_log_manual_caps) { @@ -197,7 +197,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ts_diff = timespec_elapsed(start); clear_dirty_log_total = timespec_add(clear_dirty_log_total, ts_diff); - pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n", + pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n", iteration, ts_diff.tv_sec, ts_diff.tv_nsec); } } @@ -273,7 +273,7 @@ int main(int argc, char *argv[]) while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) { switch (opt) { case 'i': - p.iterations = strtol(optarg, NULL, 10); + p.iterations = atoi(optarg); break; case 'p': p.phys_offset = strtoull(optarg, NULL, 0); -- cgit v1.2.3 From 86753bd04c7ca6b551b83c5395dd25e95de99aa4 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Jan 2021 13:42:51 -0800 Subject: KVM: selftests: Fix population stage in dirty_log_perf_test Currently the population stage in the dirty_log_perf_test does nothing as the per-vCPU iteration counters are not initialized and the loop does not wait for each vCPU. Remedy those errors. Reviewed-by: Jacob Xu Reviewed-by: Makarand Sonare Signed-off-by: Ben Gardon Message-Id: <20210112214253.463999-5-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_perf_test.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 3875f22d7283..fb6eb7fa0b45 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -139,14 +139,19 @@ static void run_test(enum vm_guest_mode mode, void *arg) clock_gettime(CLOCK_MONOTONIC, &start); for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + vcpu_last_completed_iteration[vcpu_id] = -1; + pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, &perf_test_args.vcpu_args[vcpu_id]); } - /* Allow the vCPU to populate memory */ + /* Allow the vCPUs to populate memory */ pr_debug("Starting iteration %d - Populating\n", iteration); - while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration) - ; + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != + iteration) + ; + } ts_diff = timespec_elapsed(start); pr_info("Populate memory time: %ld.%.9lds\n", -- cgit v1.2.3 From 82f91337ddde22eaa2e9e0aca248f5e6f336fa91 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Jan 2021 13:42:52 -0800 Subject: KVM: selftests: Add option to overlap vCPU memory access Add an option to overlap the ranges of memory each vCPU accesses instead of partitioning them. This option will increase the probability of multiple vCPUs faulting on the same page at the same time, and causing interesting races, if there are bugs in the page fault handler or elsewhere in the kernel. Reviewed-by: Jacob Xu Reviewed-by: Makarand Sonare Signed-off-by: Ben Gardon Message-Id: <20210112214253.463999-6-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 32 +++++++++++++++++----- tools/testing/selftests/kvm/dirty_log_perf_test.c | 14 ++++++++-- .../testing/selftests/kvm/include/perf_test_util.h | 4 ++- tools/testing/selftests/kvm/lib/perf_test_util.c | 25 ++++++++++++----- 4 files changed, 57 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index a1cd234e6f5e..e8fda95f8389 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -250,6 +250,7 @@ static int setup_demand_paging(struct kvm_vm *vm, struct test_params { bool use_uffd; useconds_t uffd_delay; + bool partition_vcpu_memory_access; }; static void run_test(enum vm_guest_mode mode, void *arg) @@ -277,7 +278,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); TEST_ASSERT(vcpu_threads, "Memory allocation failed"); - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size); + perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, + p->partition_vcpu_memory_access); if (p->use_uffd) { uffd_handler_threads = @@ -293,10 +295,19 @@ static void run_test(enum vm_guest_mode mode, void *arg) for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { vm_paddr_t vcpu_gpa; void *vcpu_hva; + uint64_t vcpu_mem_size; - vcpu_gpa = guest_test_phys_mem + (vcpu_id * guest_percpu_mem_size); + + if (p->partition_vcpu_memory_access) { + vcpu_gpa = guest_test_phys_mem + + (vcpu_id * guest_percpu_mem_size); + vcpu_mem_size = guest_percpu_mem_size; + } else { + vcpu_gpa = guest_test_phys_mem; + vcpu_mem_size = guest_percpu_mem_size * nr_vcpus; + } PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", - vcpu_id, vcpu_gpa, vcpu_gpa + guest_percpu_mem_size); + vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size); /* Cache the HVA pointer of the region */ vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); @@ -313,7 +324,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) &uffd_handler_threads[vcpu_id], pipefds[vcpu_id * 2], p->uffd_delay, &uffd_args[vcpu_id], - vcpu_hva, guest_percpu_mem_size); + vcpu_hva, vcpu_mem_size); if (r < 0) exit(-r); } @@ -376,7 +387,7 @@ static void help(char *name) { puts(""); printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n" - " [-b memory] [-v vcpus]\n", name); + " [-b memory] [-v vcpus] [-o]\n", name); guest_modes_help(); printf(" -u: use User Fault FD to handle vCPU page\n" " faults.\n"); @@ -387,6 +398,8 @@ static void help(char *name) " demand paged by each vCPU. e.g. 10M or 3G.\n" " Default: 1G\n"); printf(" -v: specify the number of vCPUs to run.\n"); + printf(" -o: Overlap guest memory accesses instead of partitioning\n" + " them into a separate region of memory for each vCPU.\n"); puts(""); exit(0); } @@ -394,12 +407,14 @@ static void help(char *name) int main(int argc, char *argv[]) { int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); - struct test_params p = {}; + struct test_params p = { + .partition_vcpu_memory_access = true, + }; int opt; guest_modes_append_default(); - while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) { + while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) { switch (opt) { case 'm': guest_modes_cmdline(optarg); @@ -419,6 +434,9 @@ int main(int argc, char *argv[]) TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, "Invalid number of vcpus, must be between 1 and %d", max_vcpus); break; + case 'o': + p.partition_vcpu_memory_access = false; + break; case 'h': default: help(argv[0]); diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index fb6eb7fa0b45..a0231be3984d 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -92,6 +92,7 @@ struct test_params { unsigned long iterations; uint64_t phys_offset; int wr_fract; + bool partition_vcpu_memory_access; }; static void run_test(enum vm_guest_mode mode, void *arg) @@ -129,7 +130,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); TEST_ASSERT(vcpu_threads, "Memory allocation failed"); - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size); + perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, + p->partition_vcpu_memory_access); sync_global_to_guest(vm, perf_test_args); @@ -240,7 +242,7 @@ static void help(char *name) { puts(""); printf("usage: %s [-h] [-i iterations] [-p offset] " - "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name); + "[-m mode] [-b vcpu bytes] [-v vcpus] [-o]\n", name); puts(""); printf(" -i: specify iteration counts (default: %"PRIu64")\n", TEST_HOST_LOOP_N); @@ -255,6 +257,8 @@ static void help(char *name) " 1/.\n" " (default: 1 i.e. all pages are written to.)\n"); printf(" -v: specify the number of vCPUs to run.\n"); + printf(" -o: Overlap guest memory accesses instead of partitioning\n" + " them into a separate region of memory for each vCPU.\n"); puts(""); exit(0); } @@ -265,6 +269,7 @@ int main(int argc, char *argv[]) struct test_params p = { .iterations = TEST_HOST_LOOP_N, .wr_fract = 1, + .partition_vcpu_memory_access = true, }; int opt; @@ -275,7 +280,7 @@ int main(int argc, char *argv[]) guest_modes_append_default(); - while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) { + while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:o")) != -1) { switch (opt) { case 'i': p.iterations = atoi(optarg); @@ -299,6 +304,9 @@ int main(int argc, char *argv[]) TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, "Invalid number of vcpus, must be between 1 and %d", max_vcpus); break; + case 'o': + p.partition_vcpu_memory_access = false; + break; case 'h': default: help(argv[0]); diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h index b1188823c31b..f406534f0487 100644 --- a/tools/testing/selftests/kvm/include/perf_test_util.h +++ b/tools/testing/selftests/kvm/include/perf_test_util.h @@ -46,6 +46,8 @@ extern uint64_t guest_test_phys_mem; struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, uint64_t vcpu_memory_bytes); void perf_test_destroy_vm(struct kvm_vm *vm); -void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes); +void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, + uint64_t vcpu_memory_bytes, + bool partition_vcpu_memory_access); #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index 9be1944c2d1c..f5fed2fbe964 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -112,7 +112,9 @@ void perf_test_destroy_vm(struct kvm_vm *vm) kvm_vm_free(vm); } -void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes) +void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, + uint64_t vcpu_memory_bytes, + bool partition_vcpu_memory_access) { vm_paddr_t vcpu_gpa; struct perf_test_vcpu_args *vcpu_args; @@ -122,13 +124,22 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_by vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; vcpu_args->vcpu_id = vcpu_id; - vcpu_args->gva = guest_test_virt_mem + - (vcpu_id * vcpu_memory_bytes); - vcpu_args->pages = vcpu_memory_bytes / - perf_test_args.guest_page_size; + if (partition_vcpu_memory_access) { + vcpu_args->gva = guest_test_virt_mem + + (vcpu_id * vcpu_memory_bytes); + vcpu_args->pages = vcpu_memory_bytes / + perf_test_args.guest_page_size; + vcpu_gpa = guest_test_phys_mem + + (vcpu_id * vcpu_memory_bytes); + } else { + vcpu_args->gva = guest_test_virt_mem; + vcpu_args->pages = (vcpus * vcpu_memory_bytes) / + perf_test_args.guest_page_size; + vcpu_gpa = guest_test_phys_mem; + } - vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes); pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", - vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes); + vcpu_id, vcpu_gpa, vcpu_gpa + + (vcpu_args->pages * perf_test_args.guest_page_size)); } } -- cgit v1.2.3 From f73a3446252e6c6d84d6b80b89fc3fe810a348c2 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 12 Jan 2021 13:42:53 -0800 Subject: KVM: selftests: Add memslot modification stress test Add a memslot modification stress test in which a memslot is repeatedly created and removed while vCPUs access memory in another memslot. Most userspaces do not create or remove memslots on running VMs which makes it hard to test races in adding and removing memslots without a dedicated test. Adding and removing a memslot also has the effect of tearing down the entire paging structure, which leads to more page faults and pressure on the page fault handling path than a one-and-done memory population test. Reviewed-by: Jacob Xu Signed-off-by: Ben Gardon Message-Id: <20210112214253.463999-7-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../kvm/memslot_modification_stress_test.c | 211 +++++++++++++++++++++ 3 files changed, 213 insertions(+) create mode 100644 tools/testing/selftests/kvm/memslot_modification_stress_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index eaba522068c7..a747260662b6 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -30,5 +30,6 @@ /dirty_log_test /dirty_log_perf_test /kvm_create_max_vcpus +/memslot_modification_stress_test /set_memory_region_test /steal_time diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b06419813f7f..fcc46355d7e1 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -64,6 +64,7 @@ TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus +TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += steal_time diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c new file mode 100644 index 000000000000..cae1b90cb63f --- /dev/null +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM memslot modification stress test + * Adapted from demand_paging_test.c + * + * Copyright (C) 2018, Red Hat, Inc. + * Copyright (C) 2020, Google, Inc. + */ + +#define _GNU_SOURCE /* for program_invocation_name */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "perf_test_util.h" +#include "processor.h" +#include "test_util.h" +#include "guest_modes.h" + +#define DUMMY_MEMSLOT_INDEX 7 + +#define DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS 10 + + +static int nr_vcpus = 1; +static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; + +static bool run_vcpus = true; + +static void *vcpu_worker(void *data) +{ + int ret; + struct perf_test_vcpu_args *vcpu_args = + (struct perf_test_vcpu_args *)data; + int vcpu_id = vcpu_args->vcpu_id; + struct kvm_vm *vm = perf_test_args.vm; + struct kvm_run *run; + + vcpu_args_set(vm, vcpu_id, 1, vcpu_id); + run = vcpu_state(vm, vcpu_id); + + /* Let the guest access its memory until a stop signal is received */ + while (READ_ONCE(run_vcpus)) { + ret = _vcpu_run(vm, vcpu_id); + TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); + + if (get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC) + continue; + + TEST_ASSERT(false, + "Invalid guest sync status: exit_reason=%s\n", + exit_reason_str(run->exit_reason)); + } + + return NULL; +} + +struct memslot_antagonist_args { + struct kvm_vm *vm; + useconds_t delay; + uint64_t nr_modifications; +}; + +static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, + uint64_t nr_modifications, uint64_t gpa) +{ + int i; + + for (i = 0; i < nr_modifications; i++) { + usleep(delay); + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, + DUMMY_MEMSLOT_INDEX, 1, 0); + + vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX); + } +} + +struct test_params { + useconds_t memslot_modification_delay; + uint64_t nr_memslot_modifications; + bool partition_vcpu_memory_access; +}; + +static void run_test(enum vm_guest_mode mode, void *arg) +{ + struct test_params *p = arg; + pthread_t *vcpu_threads; + struct kvm_vm *vm; + int vcpu_id; + + vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size); + + perf_test_args.wr_fract = 1; + + vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); + TEST_ASSERT(vcpu_threads, "Memory allocation failed"); + + perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, + p->partition_vcpu_memory_access); + + /* Export the shared variables to the guest */ + sync_global_to_guest(vm, perf_test_args); + + pr_info("Finished creating vCPUs\n"); + + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) + pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, + &perf_test_args.vcpu_args[vcpu_id]); + + pr_info("Started all vCPUs\n"); + + add_remove_memslot(vm, p->memslot_modification_delay, + p->nr_memslot_modifications, + guest_test_phys_mem + + (guest_percpu_mem_size * nr_vcpus) + + perf_test_args.host_page_size + + perf_test_args.guest_page_size); + + run_vcpus = false; + + /* Wait for the vcpu threads to quit */ + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) + pthread_join(vcpu_threads[vcpu_id], NULL); + + pr_info("All vCPU threads joined\n"); + + ucall_uninit(vm); + kvm_vm_free(vm); + + free(vcpu_threads); +} + +static void help(char *name) +{ + puts(""); + printf("usage: %s [-h] [-m mode] [-d delay_usec]\n" + " [-b memory] [-v vcpus] [-o] [-i iterations]\n", name); + guest_modes_help(); + printf(" -d: add a delay between each iteration of adding and\n" + " deleting a memslot in usec.\n"); + printf(" -b: specify the size of the memory region which should be\n" + " accessed by each vCPU. e.g. 10M or 3G.\n" + " Default: 1G\n"); + printf(" -v: specify the number of vCPUs to run.\n"); + printf(" -o: Overlap guest memory accesses instead of partitioning\n" + " them into a separate region of memory for each vCPU.\n"); + printf(" -i: specify the number of iterations of adding and removing\n" + " a memslot.\n" + " Default: %d\n", DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS); + puts(""); + exit(0); +} + +int main(int argc, char *argv[]) +{ + int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); + int opt; + struct test_params p = { + .memslot_modification_delay = 0, + .nr_memslot_modifications = + DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS, + .partition_vcpu_memory_access = true + }; + + guest_modes_append_default(); + + while ((opt = getopt(argc, argv, "hm:d:b:v:oi:")) != -1) { + switch (opt) { + case 'm': + guest_modes_cmdline(optarg); + break; + case 'd': + p.memslot_modification_delay = strtoul(optarg, NULL, 0); + TEST_ASSERT(p.memslot_modification_delay >= 0, + "A negative delay is not supported."); + break; + case 'b': + guest_percpu_mem_size = parse_size(optarg); + break; + case 'v': + nr_vcpus = atoi(optarg); + TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, + "Invalid number of vcpus, must be between 1 and %d", + max_vcpus); + break; + case 'o': + p.partition_vcpu_memory_access = false; + break; + case 'i': + p.nr_memslot_modifications = atoi(optarg); + break; + case 'h': + default: + help(argv[0]); + break; + } + } + + for_each_guest_mode(run_test, &p); + + return 0; +} -- cgit v1.2.3 From 9e965bb75aaec28a9537e35871106367fe88b702 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:33 -0800 Subject: KVM: selftests: Add backing src parameter to dirty_log_perf_test Add a parameter to control the backing memory type for dirty_log_perf_test so that the test can be run with hugepages. To: linux-kselftest@vger.kernel.org CC: Peter Xu CC: Andrew Jones CC: Thomas Huth Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-28-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 3 ++- tools/testing/selftests/kvm/dirty_log_perf_test.c | 14 ++++++++--- tools/testing/selftests/kvm/include/kvm_util.h | 6 ----- .../testing/selftests/kvm/include/perf_test_util.h | 3 ++- tools/testing/selftests/kvm/include/test_util.h | 14 +++++++++++ tools/testing/selftests/kvm/lib/perf_test_util.c | 6 ++--- tools/testing/selftests/kvm/lib/test_util.c | 29 ++++++++++++++++++++++ .../kvm/memslot_modification_stress_test.c | 3 ++- 8 files changed, 63 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index e8fda95f8389..5f7a229c3af1 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -266,7 +266,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) int vcpu_id; int r; - vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size); + vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, + VM_MEM_SRC_ANONYMOUS); perf_test_args.wr_fract = 1; diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index a0231be3984d..3b558fba1c88 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -93,6 +93,7 @@ struct test_params { uint64_t phys_offset; int wr_fract; bool partition_vcpu_memory_access; + enum vm_mem_backing_src_type backing_src; }; static void run_test(enum vm_guest_mode mode, void *arg) @@ -112,7 +113,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct kvm_enable_cap cap = {}; struct timespec clear_dirty_log_total = (struct timespec){0}; - vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size); + vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, + p->backing_src); perf_test_args.wr_fract = p->wr_fract; @@ -242,7 +244,7 @@ static void help(char *name) { puts(""); printf("usage: %s [-h] [-i iterations] [-p offset] " - "[-m mode] [-b vcpu bytes] [-v vcpus] [-o]\n", name); + "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]\n", name); puts(""); printf(" -i: specify iteration counts (default: %"PRIu64")\n", TEST_HOST_LOOP_N); @@ -259,6 +261,9 @@ static void help(char *name) printf(" -v: specify the number of vCPUs to run.\n"); printf(" -o: Overlap guest memory accesses instead of partitioning\n" " them into a separate region of memory for each vCPU.\n"); + printf(" -s: specify the type of memory that should be used to\n" + " back the guest data region.\n\n"); + backing_src_help(); puts(""); exit(0); } @@ -270,6 +275,7 @@ int main(int argc, char *argv[]) .iterations = TEST_HOST_LOOP_N, .wr_fract = 1, .partition_vcpu_memory_access = true, + .backing_src = VM_MEM_SRC_ANONYMOUS, }; int opt; @@ -280,7 +286,7 @@ int main(int argc, char *argv[]) guest_modes_append_default(); - while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:o")) != -1) { + while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:os:")) != -1) { switch (opt) { case 'i': p.iterations = atoi(optarg); @@ -306,6 +312,8 @@ int main(int argc, char *argv[]) break; case 'o': p.partition_vcpu_memory_access = false; + case 's': + p.backing_src = parse_backing_src_type(optarg); break; case 'h': default: diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 5cbb861525ed..2d7eb6989e83 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -79,12 +79,6 @@ struct vm_guest_mode_params { }; extern const struct vm_guest_mode_params vm_guest_mode_params[]; -enum vm_mem_backing_src_type { - VM_MEM_SRC_ANONYMOUS, - VM_MEM_SRC_ANONYMOUS_THP, - VM_MEM_SRC_ANONYMOUS_HUGETLB, -}; - int kvm_check_cap(long cap); int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h index f406534f0487..005f2143adeb 100644 --- a/tools/testing/selftests/kvm/include/perf_test_util.h +++ b/tools/testing/selftests/kvm/include/perf_test_util.h @@ -44,7 +44,8 @@ extern struct perf_test_args perf_test_args; extern uint64_t guest_test_phys_mem; struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, - uint64_t vcpu_memory_bytes); + uint64_t vcpu_memory_bytes, + enum vm_mem_backing_src_type backing_src); void perf_test_destroy_vm(struct kvm_vm *vm); void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes, diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index b86090ef82da..b7f41399f22c 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -67,4 +67,18 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); struct timespec timespec_elapsed(struct timespec start); struct timespec timespec_div(struct timespec ts, int divisor); +enum vm_mem_backing_src_type { + VM_MEM_SRC_ANONYMOUS, + VM_MEM_SRC_ANONYMOUS_THP, + VM_MEM_SRC_ANONYMOUS_HUGETLB, +}; + +struct vm_mem_backing_src_alias { + const char *name; + enum vm_mem_backing_src_type type; +}; + +void backing_src_help(void); +enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); + #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index f5fed2fbe964..81490b9b4e32 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -49,7 +49,8 @@ static void guest_code(uint32_t vcpu_id) } struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, - uint64_t vcpu_memory_bytes) + uint64_t vcpu_memory_bytes, + enum vm_mem_backing_src_type backing_src) { struct kvm_vm *vm; uint64_t guest_num_pages; @@ -93,8 +94,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); /* Add an extra memory slot for testing */ - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, - guest_test_phys_mem, + vm_userspace_mem_region_add(vm, backing_src, guest_test_phys_mem, PERF_TEST_MEM_SLOT_INDEX, guest_num_pages, 0); diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 5f87ed32caf5..906c955384e2 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -10,6 +10,7 @@ #include #include #include +#include "linux/kernel.h" #include "test_util.h" @@ -109,3 +110,31 @@ void print_skip(const char *fmt, ...) va_end(ap); puts(", skipping test"); } + +const struct vm_mem_backing_src_alias backing_src_aliases[] = { + {"anonymous", VM_MEM_SRC_ANONYMOUS,}, + {"anonymous_thp", VM_MEM_SRC_ANONYMOUS_THP,}, + {"anonymous_hugetlb", VM_MEM_SRC_ANONYMOUS_HUGETLB,}, +}; + +void backing_src_help(void) +{ + int i; + + printf("Available backing src types:\n"); + for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++) + printf("\t%s\n", backing_src_aliases[i].name); +} + +enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++) + if (!strcmp(type_name, backing_src_aliases[i].name)) + return backing_src_aliases[i].type; + + backing_src_help(); + TEST_FAIL("Unknown backing src type: %s", type_name); + return -1; +} diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index cae1b90cb63f..6096bf0a5b34 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -97,7 +97,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct kvm_vm *vm; int vcpu_id; - vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size); + vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, + VM_MEM_SRC_ANONYMOUS); perf_test_args.wr_fract = 1; -- cgit v1.2.3 From c1d1650f55b1d5bca2e42564391c5484a9a3013b Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:34 -0800 Subject: KVM: selftests: Disable dirty logging with vCPUs running Disabling dirty logging is much more intestesting from a testing perspective if the vCPUs are still running. This also excercises the code-path in which collapsible SPTEs must be faulted back in at a higher level after disabling dirty logging. To: linux-kselftest@vger.kernel.org CC: Peter Xu CC: Andrew Jones CC: Thomas Huth Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-29-bgardon@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_perf_test.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 3b558fba1c88..04a2641261be 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -211,11 +211,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) } } - /* Tell the vcpu thread to quit */ - host_quit = true; - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) - pthread_join(vcpu_threads[vcpu_id], NULL); - /* Disable dirty logging */ clock_gettime(CLOCK_MONOTONIC, &start); vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0); @@ -223,6 +218,11 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_info("Disabling dirty logging time: %ld.%.9lds\n", ts_diff.tv_sec, ts_diff.tv_nsec); + /* Tell the vcpu thread to quit */ + host_quit = true; + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) + pthread_join(vcpu_threads[vcpu_id], NULL); + avg = timespec_div(get_dirty_log_total, p->iterations); pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", p->iterations, get_dirty_log_total.tv_sec, -- cgit v1.2.3 From 2c07ded06427dd3339278487a1413d5e478f05f9 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Mon, 4 Jan 2021 09:17:49 -0600 Subject: KVM/SVM: add support for SEV attestation command The SEV FW version >= 0.23 added a new command that can be used to query the attestation report containing the SHA-256 digest of the guest memory encrypted through the KVM_SEV_LAUNCH_UPDATE_{DATA, VMSA} commands and sign the report with the Platform Endorsement Key (PEK). See the SEV FW API spec section 6.8 for more details. Note there already exist a command (KVM_SEV_LAUNCH_MEASURE) that can be used to get the SHA-256 digest. The main difference between the KVM_SEV_LAUNCH_MEASURE and KVM_SEV_ATTESTATION_REPORT is that the latter can be called while the guest is running and the measurement value is signed with PEK. Cc: James Bottomley Cc: Tom Lendacky Cc: David Rientjes Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Borislav Petkov Cc: John Allen Cc: Herbert Xu Cc: linux-crypto@vger.kernel.org Reviewed-by: Tom Lendacky Acked-by: David Rientjes Tested-by: James Bottomley Signed-off-by: Brijesh Singh Message-Id: <20210104151749.30248-1-brijesh.singh@amd.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/amd-memory-encryption.rst | 21 +++++++ arch/x86/kvm/svm/sev.c | 71 ++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.c | 1 + include/linux/psp-sev.h | 17 ++++++ include/uapi/linux/kvm.h | 8 +++ 5 files changed, 118 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 09a8f2a34e39..469a6308765b 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -263,6 +263,27 @@ Returns: 0 on success, -negative on error __u32 trans_len; }; +10. KVM_SEV_GET_ATTESTATION_REPORT +---------------------------------- + +The KVM_SEV_GET_ATTESTATION_REPORT command can be used by the hypervisor to query the attestation +report containing the SHA-256 digest of the guest memory and VMSA passed through the KVM_SEV_LAUNCH +commands and signed with the PEK. The digest returned by the command should match the digest +used by the guest owner with the KVM_SEV_LAUNCH_MEASURE. + +Parameters (in): struct kvm_sev_attestation + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_attestation_report { + __u8 mnonce[16]; /* A random mnonce that will be placed in the report */ + + __u64 uaddr; /* userspace address where the report should be copied */ + __u32 len; + }; + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 48017fef1cd9..8dfe8988be8d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1041,6 +1041,74 @@ e_unpin_memory: return ret; } +static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + void __user *report = (void __user *)(uintptr_t)argp->data; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data; + struct kvm_sev_attestation_report params; + void __user *p; + void *blob = NULL; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (!data) + return -ENOMEM; + + /* User wants to query the blob length */ + if (!params.len) + goto cmd; + + p = (void __user *)(uintptr_t)params.uaddr; + if (p) { + if (params.len > SEV_FW_BLOB_MAX_SIZE) { + ret = -EINVAL; + goto e_free; + } + + ret = -ENOMEM; + blob = kmalloc(params.len, GFP_KERNEL); + if (!blob) + goto e_free; + + data->address = __psp_pa(blob); + data->len = params.len; + memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce)); + } +cmd: + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error); + /* + * If we query the session length, FW responded with expected data. + */ + if (!params.len) + goto done; + + if (ret) + goto e_free_blob; + + if (blob) { + if (copy_to_user(p, blob, params.len)) + ret = -EFAULT; + } + +done: + params.len = data->len; + if (copy_to_user(report, ¶ms, sizeof(params))) + ret = -EFAULT; +e_free_blob: + kfree(blob); +e_free: + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1091,6 +1159,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_LAUNCH_SECRET: r = sev_launch_secret(kvm, &sev_cmd); break; + case KVM_SEV_GET_ATTESTATION_REPORT: + r = sev_get_attestation_report(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 476113e12489..cb9b4c4e371e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -128,6 +128,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); + case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); default: return 0; } diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 49d155cd2dfe..b801ead1e2bb 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -66,6 +66,7 @@ enum sev_cmd { SEV_CMD_LAUNCH_MEASURE = 0x033, SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034, SEV_CMD_LAUNCH_FINISH = 0x035, + SEV_CMD_ATTESTATION_REPORT = 0x036, /* Guest migration commands (outgoing) */ SEV_CMD_SEND_START = 0x040, @@ -483,6 +484,22 @@ struct sev_data_dbg { u32 len; /* In */ } __packed; +/** + * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters + * + * @handle: handle of the VM + * @mnonce: a random nonce that will be included in the report. + * @address: physical address where the report will be copied. + * @len: length of the physical buffer. + */ +struct sev_data_attestation_report { + u32 handle; /* In */ + u32 reserved; + u64 address; /* In */ + u8 mnonce[16]; /* In */ + u32 len; /* In/Out */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 374c67875cdb..07c194e2c302 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1593,6 +1593,8 @@ enum sev_cmd_id { KVM_SEV_DBG_ENCRYPT, /* Guest certificates commands */ KVM_SEV_CERT_EXPORT, + /* Attestation report */ + KVM_SEV_GET_ATTESTATION_REPORT, KVM_SEV_NR_MAX, }; @@ -1645,6 +1647,12 @@ struct kvm_sev_dbg { __u32 len; }; +struct kvm_sev_attestation_report { + __u8 mnonce[16]; + __u64 uaddr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- cgit v1.2.3 From 8e53324021645f820a01bf8aa745711c802c8542 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 6 Nov 2020 17:03:12 +0800 Subject: KVM: VMX: Convert vcpu_vmx.exit_reason to a union Convert vcpu_vmx.exit_reason from a u32 to a union (of size u32). The full VM_EXIT_REASON field is comprised of a 16-bit basic exit reason in bits 15:0, and single-bit modifiers in bits 31:16. Historically, KVM has only had to worry about handling the "failed VM-Entry" modifier, which could only be set in very specific flows and required dedicated handling. I.e. manually stripping the FAILED_VMENTRY bit was a somewhat viable approach. But even with only a single bit to worry about, KVM has had several bugs related to comparing a basic exit reason against the full exit reason store in vcpu_vmx. Upcoming Intel features, e.g. SGX, will add new modifier bits that can be set on more or less any VM-Exit, as opposed to the significantly more restricted FAILED_VMENTRY, i.e. correctly handling everything in one-off flows isn't scalable. Tracking exit reason in a union forces code to explicitly choose between consuming the full exit reason and the basic exit, and is a convenient way to document and access the modifiers. No functional change intended. Cc: Xiaoyao Li Signed-off-by: Sean Christopherson Signed-off-by: Chenyi Qiang Message-Id: <20201106090315.18606-2-chenyi.qiang@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 42 ++++++++++++++++++----------- arch/x86/kvm/vmx/vmx.c | 68 ++++++++++++++++++++++++----------------------- arch/x86/kvm/vmx/vmx.h | 25 ++++++++++++++++- 3 files changed, 86 insertions(+), 49 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f2b9bfb58206..cb48236cc24d 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3330,7 +3330,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12 = get_vmcs12(vcpu); enum vm_entry_failure_code entry_failure_code; bool evaluate_pending_interrupts; - u32 exit_reason, failed_index; + union vmx_exit_reason exit_reason = { + .basic = EXIT_REASON_INVALID_STATE, + .failed_vmentry = 1, + }; + u32 failed_index; if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) kvm_vcpu_flush_tlb_current(vcpu); @@ -3382,7 +3386,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (nested_vmx_check_guest_state(vcpu, vmcs12, &entry_failure_code)) { - exit_reason = EXIT_REASON_INVALID_STATE; + exit_reason.basic = EXIT_REASON_INVALID_STATE; vmcs12->exit_qualification = entry_failure_code; goto vmentry_fail_vmexit; } @@ -3393,7 +3397,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vcpu->arch.tsc_offset += vmcs12->tsc_offset; if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) { - exit_reason = EXIT_REASON_INVALID_STATE; + exit_reason.basic = EXIT_REASON_INVALID_STATE; vmcs12->exit_qualification = entry_failure_code; goto vmentry_fail_vmexit_guest_mode; } @@ -3403,7 +3407,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); if (failed_index) { - exit_reason = EXIT_REASON_MSR_LOAD_FAIL; + exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; vmcs12->exit_qualification = failed_index; goto vmentry_fail_vmexit_guest_mode; } @@ -3471,7 +3475,7 @@ vmentry_fail_vmexit: return NVMX_VMENTRY_VMEXIT; load_vmcs12_host_state(vcpu, vmcs12); - vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->vm_exit_reason = exit_reason.full; if (enable_shadow_vmcs || vmx->nested.hv_evmcs) vmx->nested.need_vmcs12_to_shadow_sync = true; return NVMX_VMENTRY_VMEXIT; @@ -5559,7 +5563,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); fail: - nested_vmx_vmexit(vcpu, vmx->exit_reason, + /* + * This is effectively a reflected VM-Exit, as opposed to a synthesized + * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode + * EXIT_REASON_VMFUNC as the exit reason. + */ + nested_vmx_vmexit(vcpu, vmx->exit_reason.full, vmx_get_intr_info(vcpu), vmx_get_exit_qual(vcpu)); return 1; @@ -5627,7 +5636,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, u32 exit_reason) + struct vmcs12 *vmcs12, + union vmx_exit_reason exit_reason) { u32 msr_index = kvm_rcx_read(vcpu); gpa_t bitmap; @@ -5641,7 +5651,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; - if (exit_reason == EXIT_REASON_MSR_WRITE) + if (exit_reason.basic == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; @@ -5778,11 +5788,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) * Return true if L0 wants to handle an exit from L2 regardless of whether or not * L1 wants the exit. Only call this when in is_guest_mode (L2). */ -static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) +static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, + union vmx_exit_reason exit_reason) { u32 intr_info; - switch ((u16)exit_reason) { + switch ((u16)exit_reason.basic) { case EXIT_REASON_EXCEPTION_NMI: intr_info = vmx_get_intr_info(vcpu); if (is_nmi(intr_info)) @@ -5838,12 +5849,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) * Return 1 if L1 wants to intercept an exit from L2. Only call this when in * is_guest_mode (L2). */ -static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) +static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, + union vmx_exit_reason exit_reason) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 intr_info; - switch ((u16)exit_reason) { + switch ((u16)exit_reason.basic) { case EXIT_REASON_EXCEPTION_NMI: intr_info = vmx_get_intr_info(vcpu); if (is_nmi(intr_info)) @@ -5962,7 +5974,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exit_reason = vmx->exit_reason; + union vmx_exit_reason exit_reason = vmx->exit_reason; unsigned long exit_qual; u32 exit_intr_info; @@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) goto reflect_vmexit; } - trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX); + trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX); /* If L0 (KVM) wants the exit, it trumps L1's desires. */ if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) @@ -6007,7 +6019,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) exit_qual = vmx_get_exit_qual(vcpu); reflect_vmexit: - nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual); + nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); return true; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index eb69fef57485..880a2617820c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1577,7 +1577,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) * i.e. we end up advancing IP with some random value. */ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || - to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { + to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { orig_rip = kvm_rip_read(vcpu); rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); #ifdef CONFIG_X86_64 @@ -5667,7 +5667,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, struct vcpu_vmx *vmx = to_vmx(vcpu); *info1 = vmx_get_exit_qual(vcpu); - if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { + if (!(vmx->exit_reason.failed_vmentry)) { *info2 = vmx->idt_vectoring_info; *intr_info = vmx_get_intr_info(vcpu); if (is_exception_with_error_code(*intr_info)) @@ -5911,8 +5911,9 @@ void dump_vmcs(void) static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exit_reason = vmx->exit_reason; + union vmx_exit_reason exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; + u16 exit_handler_index; /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more @@ -5954,11 +5955,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) return 1; } - if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { + if (exit_reason.failed_vmentry) { dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason - = exit_reason; + = exit_reason.full; vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; return 0; } @@ -5980,18 +5981,18 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && - (exit_reason != EXIT_REASON_EXCEPTION_NMI && - exit_reason != EXIT_REASON_EPT_VIOLATION && - exit_reason != EXIT_REASON_PML_FULL && - exit_reason != EXIT_REASON_APIC_ACCESS && - exit_reason != EXIT_REASON_TASK_SWITCH)) { + (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI && + exit_reason.basic != EXIT_REASON_EPT_VIOLATION && + exit_reason.basic != EXIT_REASON_PML_FULL && + exit_reason.basic != EXIT_REASON_APIC_ACCESS && + exit_reason.basic != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vectoring_info; - vcpu->run->internal.data[1] = exit_reason; + vcpu->run->internal.data[1] = exit_reason.full; vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; - if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { + if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) { vcpu->run->internal.ndata++; vcpu->run->internal.data[3] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); @@ -6023,38 +6024,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (exit_fastpath != EXIT_FASTPATH_NONE) return 1; - if (exit_reason >= kvm_vmx_max_exit_handlers) + if (exit_reason.basic >= kvm_vmx_max_exit_handlers) goto unexpected_vmexit; #ifdef CONFIG_RETPOLINE - if (exit_reason == EXIT_REASON_MSR_WRITE) + if (exit_reason.basic == EXIT_REASON_MSR_WRITE) return kvm_emulate_wrmsr(vcpu); - else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER) + else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER) return handle_preemption_timer(vcpu); - else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW) + else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW) return handle_interrupt_window(vcpu); - else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) + else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) return handle_external_interrupt(vcpu); - else if (exit_reason == EXIT_REASON_HLT) + else if (exit_reason.basic == EXIT_REASON_HLT) return kvm_emulate_halt(vcpu); - else if (exit_reason == EXIT_REASON_EPT_MISCONFIG) + else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) return handle_ept_misconfig(vcpu); #endif - exit_reason = array_index_nospec(exit_reason, - kvm_vmx_max_exit_handlers); - if (!kvm_vmx_exit_handlers[exit_reason]) + exit_handler_index = array_index_nospec((u16)exit_reason.basic, + kvm_vmx_max_exit_handlers); + if (!kvm_vmx_exit_handlers[exit_handler_index]) goto unexpected_vmexit; - return kvm_vmx_exit_handlers[exit_reason](vcpu); + return kvm_vmx_exit_handlers[exit_handler_index](vcpu); unexpected_vmexit: - vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); + vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", + exit_reason.full); dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; vcpu->run->internal.ndata = 2; - vcpu->run->internal.data[0] = exit_reason; + vcpu->run->internal.data[0] = exit_reason.full; vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; return 0; } @@ -6373,9 +6375,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) + if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) handle_external_interrupt_irqoff(vcpu); - else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI) + else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) handle_exception_nmi_irqoff(vmx); } @@ -6567,7 +6569,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { - switch (to_vmx(vcpu)->exit_reason) { + switch (to_vmx(vcpu)->exit_reason.basic) { case EXIT_REASON_MSR_WRITE: return handle_fastpath_set_msr_irqoff(vcpu); case EXIT_REASON_PREEMPTION_TIMER: @@ -6768,17 +6770,17 @@ reenter_guest: vmx->idt_vectoring_info = 0; if (unlikely(vmx->fail)) { - vmx->exit_reason = 0xdead; + vmx->exit_reason.full = 0xdead; return EXIT_FASTPATH_NONE; } - vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); - if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)) + vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); + if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) kvm_machine_check(); - trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); + trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX); - if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + if (unlikely(vmx->exit_reason.failed_vmentry)) return EXIT_FASTPATH_NONE; vmx->loaded_vmcs->launched = 1; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 9d3a557949ac..4dd71b7494ea 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -70,6 +70,29 @@ struct pt_desc { struct pt_ctx guest; }; +union vmx_exit_reason { + struct { + u32 basic : 16; + u32 reserved16 : 1; + u32 reserved17 : 1; + u32 reserved18 : 1; + u32 reserved19 : 1; + u32 reserved20 : 1; + u32 reserved21 : 1; + u32 reserved22 : 1; + u32 reserved23 : 1; + u32 reserved24 : 1; + u32 reserved25 : 1; + u32 reserved26 : 1; + u32 enclave_mode : 1; + u32 smi_pending_mtf : 1; + u32 smi_from_vmx_root : 1; + u32 reserved30 : 1; + u32 failed_vmentry : 1; + }; + u32 full; +}; + /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -244,7 +267,7 @@ struct vcpu_vmx { int vpid; bool emulation_required; - u32 exit_reason; + union vmx_exit_reason exit_reason; /* Posted interrupt descriptor */ struct pi_desc pi_desc; -- cgit v1.2.3 From 15aad3be9adb3fb7fba84190a2ce57d66e8b51da Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Fri, 6 Nov 2020 17:03:13 +0800 Subject: KVM: X86: Reset the vcpu->run->flags at the beginning of vcpu_run Reset the vcpu->run->flags at the beginning of kvm_arch_vcpu_ioctl_run. It can avoid every thunk of code that needs to set the flag clear it, which increases the odds of missing a case and ending up with a flag in an undefined state. Signed-off-by: Chenyi Qiang Message-Id: <20201106090315.18606-3-chenyi.qiang@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3905ca46b313..90a2335498a2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8205,12 +8205,14 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) kvm_run->if_flag = !vcpu->arch.guest_state_protected && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; - kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); kvm_run->ready_for_interrupt_injection = pic_in_kernel(vcpu->kvm) || kvm_vcpu_ready_for_interrupt_injection(vcpu); + + if (is_smm(vcpu)) + kvm_run->flags |= KVM_RUN_X86_SMM; } static void update_cr8_intercept(struct kvm_vcpu *vcpu) @@ -9320,6 +9322,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu_load(vcpu); kvm_sigset_activate(vcpu); + kvm_run->flags = 0; kvm_load_guest_fpu(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { -- cgit v1.2.3 From fe6b6bc802b40081e8a7a1abe8d32b88d10a03e1 Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Fri, 6 Nov 2020 17:03:14 +0800 Subject: KVM: VMX: Enable bus lock VM exit Virtual Machine can exploit bus locks to degrade the performance of system. Bus lock can be caused by split locked access to writeback(WB) memory or by using locks on uncacheable(UC) memory. The bus lock is typically >1000 cycles slower than an atomic operation within a cache line. It also disrupts performance on other cores (which must wait for the bus lock to be released before their memory operations can complete). To address the threat, bus lock VM exit is introduced to notify the VMM when a bus lock was acquired, allowing it to enforce throttling or other policy based mitigations. A VMM can enable VM exit due to bus locks by setting a new "Bus Lock Detection" VM-execution control(bit 30 of Secondary Processor-based VM execution controls). If delivery of this VM exit was preempted by a higher priority VM exit (e.g. EPT misconfiguration, EPT violation, APIC access VM exit, APIC write VM exit, exception bitmap exiting), bit 26 of exit reason in vmcs field is set to 1. In current implementation, the KVM exposes this capability through KVM_CAP_X86_BUS_LOCK_EXIT. The user can get the supported mode bitmap (i.e. off and exit) and enable it explicitly (disabled by default). If bus locks in guest are detected by KVM, exit to user space even when current exit reason is handled by KVM internally. Set a new field KVM_RUN_BUS_LOCK in vcpu->run->flags to inform the user space that there is a bus lock detected in guest. Document for Bus Lock VM exit is now available at the latest "Intel Architecture Instruction Set Extensions Programming Reference". Document Link: https://software.intel.com/content/www/us/en/develop/download/intel-architecture-instruction-set-extensions-programming-reference.html Co-developed-by: Xiaoyao Li Signed-off-by: Xiaoyao Li Signed-off-by: Chenyi Qiang Message-Id: <20201106090315.18606-4-chenyi.qiang@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/include/asm/vmx.h | 1 + arch/x86/include/asm/vmxfeatures.h | 1 + arch/x86/include/uapi/asm/kvm.h | 1 + arch/x86/include/uapi/asm/vmx.h | 4 +++- arch/x86/kvm/vmx/capabilities.h | 6 ++++++ arch/x86/kvm/vmx/vmx.c | 37 +++++++++++++++++++++++++++++++++++-- arch/x86/kvm/vmx/vmx.h | 2 +- arch/x86/kvm/x86.c | 23 +++++++++++++++++++++++ include/uapi/linux/kvm.h | 5 +++++ 10 files changed, 83 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ed575c5655dd..c0706022d22f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -52,6 +52,9 @@ #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) +#define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \ + KVM_BUS_LOCK_DETECTION_EXIT) + /* x86-specific vcpu->requests bit members */ #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) @@ -996,6 +999,8 @@ struct kvm_arch { struct msr_bitmap_range ranges[16]; } msr_filter; + bool bus_lock_detection_enabled; + struct kvm_pmu_event_filter *pmu_event_filter; struct task_struct *nx_lpage_recovery_thread; @@ -1418,6 +1423,8 @@ extern u8 kvm_tsc_scaling_ratio_frac_bits; extern u64 kvm_max_tsc_scaling_ratio; /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ extern u64 kvm_default_tsc_scaling_ratio; +/* bus lock detection supported? */ +extern bool kvm_has_bus_lock_exit; extern u64 kvm_mce_cap_supported; diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 38ca445a8429..358707f60d99 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -73,6 +73,7 @@ #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) +#define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION) #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index 9915990fd8cf..d9a74681a77d 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -83,5 +83,6 @@ #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */ #define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */ #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */ +#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */ #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 8e76d3701db3..5a3022c8af82 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -112,6 +112,7 @@ struct kvm_ioapic_state { #define KVM_NR_IRQCHIPS 3 #define KVM_RUN_X86_SMM (1 << 0) +#define KVM_RUN_X86_BUS_LOCK (1 << 1) /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index ada955c5ebb6..b8e650a985e3 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -89,6 +89,7 @@ #define EXIT_REASON_XRSTORS 64 #define EXIT_REASON_UMWAIT 67 #define EXIT_REASON_TPAUSE 68 +#define EXIT_REASON_BUS_LOCK 74 #define VMX_EXIT_REASONS \ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ @@ -150,7 +151,8 @@ { EXIT_REASON_XSAVES, "XSAVES" }, \ { EXIT_REASON_XRSTORS, "XRSTORS" }, \ { EXIT_REASON_UMWAIT, "UMWAIT" }, \ - { EXIT_REASON_TPAUSE, "TPAUSE" } + { EXIT_REASON_TPAUSE, "TPAUSE" }, \ + { EXIT_REASON_BUS_LOCK, "BUS_LOCK" } #define VMX_EXIT_REASON_FLAGS \ { VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" } diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 3a1861403d73..5366ccdd134c 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -262,6 +262,12 @@ static inline bool cpu_has_vmx_tsc_scaling(void) SECONDARY_EXEC_TSC_SCALING; } +static inline bool cpu_has_vmx_bus_lock_detection(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_BUS_LOCK_DETECTION; +} + static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 880a2617820c..af8d3a2db3d2 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2428,7 +2428,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX | - SECONDARY_EXEC_ENABLE_VMFUNC; + SECONDARY_EXEC_ENABLE_VMFUNC | + SECONDARY_EXEC_BUS_LOCK_DETECTION; if (cpu_has_sgx()) opt2 |= SECONDARY_EXEC_ENCLS_EXITING; if (adjust_vmx_controls(min2, opt2, @@ -4269,6 +4270,9 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, ENABLE_USR_WAIT_PAUSE, false); + if (!vcpu->kvm->arch.bus_lock_detection_enabled) + exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION; + vmx->secondary_exec_control = exec_control; } @@ -5600,6 +5604,13 @@ static int handle_encls(struct kvm_vcpu *vcpu) return 1; } +static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) +{ + vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; + vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; + return 0; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -5656,6 +5667,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_VMFUNC] = handle_vmx_instruction, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, [EXIT_REASON_ENCLS] = handle_encls, + [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit, }; static const int kvm_vmx_max_exit_handlers = @@ -5908,7 +5920,7 @@ void dump_vmcs(void) * The guest has exited. See if we can fix it or if we need userspace * assistance. */ -static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) { struct vcpu_vmx *vmx = to_vmx(vcpu); union vmx_exit_reason exit_reason = vmx->exit_reason; @@ -6061,6 +6073,25 @@ unexpected_vmexit: return 0; } +static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + int ret = __vmx_handle_exit(vcpu, exit_fastpath); + + /* + * Even when current exit reason is handled by KVM internally, we + * still need to exit to user space when bus lock detected to inform + * that there is a bus lock in guest. + */ + if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { + if (ret > 0) + vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; + + vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; + return 0; + } + return ret; +} + /* * Software based L1D cache flush which is used when microcode providing * the cache control MSR is not loaded. @@ -7812,6 +7843,8 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } + kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ if (enable_ept) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 4dd71b7494ea..cf335e94f198 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -83,7 +83,7 @@ union vmx_exit_reason { u32 reserved23 : 1; u32 reserved24 : 1; u32 reserved25 : 1; - u32 reserved26 : 1; + u32 bus_lock_detected : 1; u32 enclave_mode : 1; u32 smi_pending_mtf : 1; u32 smi_from_vmx_root : 1; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 90a2335498a2..8719877fe799 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -136,6 +136,8 @@ u64 __read_mostly kvm_max_tsc_scaling_ratio; EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio); u64 __read_mostly kvm_default_tsc_scaling_ratio; EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio); +bool __read_mostly kvm_has_bus_lock_exit; +EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 __read_mostly tsc_tolerance_ppm = 250; @@ -3843,6 +3845,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_STEAL_TIME: r = sched_info_on(); break; + case KVM_CAP_X86_BUS_LOCK_EXIT: + if (kvm_has_bus_lock_exit) + r = KVM_BUS_LOCK_DETECTION_OFF | + KVM_BUS_LOCK_DETECTION_EXIT; + else + r = 0; + break; default: break; } @@ -5298,6 +5307,20 @@ split_irqchip_unlock: kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; break; + case KVM_CAP_X86_BUS_LOCK_EXIT: + r = -EINVAL; + if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) + break; + + if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && + (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) + break; + + if (kvm_has_bus_lock_exit && + cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) + kvm->arch.bus_lock_detection_enabled = true; + r = 0; + break; default: r = -EINVAL; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 07c194e2c302..dfe3ba5cf262 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -252,6 +252,7 @@ struct kvm_hyperv_exit { #define KVM_EXIT_X86_WRMSR 30 #define KVM_EXIT_DIRTY_RING_FULL 31 #define KVM_EXIT_AP_RESET_HOLD 32 +#define KVM_EXIT_X86_BUS_LOCK 33 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -1058,6 +1059,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 +#define KVM_CAP_X86_BUS_LOCK_EXIT 193 #ifdef KVM_CAP_IRQ_ROUTING @@ -1774,4 +1776,7 @@ struct kvm_dirty_gfn { __u64 offset; }; +#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) +#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) + #endif /* __LINUX_KVM_H */ -- cgit v1.2.3 From c32b1b896d2ab30ac30bc39194bac47a09f7f497 Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Fri, 6 Nov 2020 17:03:15 +0800 Subject: KVM: X86: Add the Document for KVM_CAP_X86_BUS_LOCK_EXIT Introduce a new capability named KVM_CAP_X86_BUS_LOCK_EXIT, which is used to handle bus locks detected in guest. It allows the userspace to do custom throttling policies to mitigate the 'noisy neighbour' problem. Signed-off-by: Chenyi Qiang Message-Id: <20201106090315.18606-5-chenyi.qiang@intel.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 45 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 99ceb978c8b0..effcc08733fd 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4893,9 +4893,11 @@ local APIC is not used. __u16 flags; More architecture-specific flags detailing state of the VCPU that may -affect the device's behavior. The only currently defined flag is -KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the -VCPU is in system management mode. +affect the device's behavior. Current defined flags: + /* x86, set if the VCPU is in system management mode */ + #define KVM_RUN_X86_SMM (1 << 0) + /* x86, set if bus lock detected in VM */ + #define KVM_RUN_BUS_LOCK (1 << 1) :: @@ -6038,6 +6040,43 @@ KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications which user space can then handle to implement model specific MSR handling and/or user notifications to inform a user that an MSR was not handled. +7.22 KVM_CAP_X86_BUS_LOCK_EXIT +------------------------------- + +:Architectures: x86 +:Target: VM +:Parameters: args[0] defines the policy used when bus locks detected in guest +:Returns: 0 on success, -EINVAL when args[0] contains invalid bits + +Valid bits in args[0] are:: + + #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) + #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) + +Enabling this capability on a VM provides userspace with a way to select +a policy to handle the bus locks detected in guest. Userspace can obtain +the supported modes from the result of KVM_CHECK_EXTENSION and define it +through the KVM_ENABLE_CAP. + +KVM_BUS_LOCK_DETECTION_OFF and KVM_BUS_LOCK_DETECTION_EXIT are supported +currently and mutually exclusive with each other. More bits can be added in +the future. + +With KVM_BUS_LOCK_DETECTION_OFF set, bus locks in guest will not cause vm exits +so that no additional actions are needed. This is the default mode. + +With KVM_BUS_LOCK_DETECTION_EXIT set, vm exits happen when bus lock detected +in VM. KVM just exits to userspace when handling them. Userspace can enforce +its own throttling or other policy based mitigations. + +This capability is aimed to address the thread that VM can exploit bus locks to +degree the performance of the whole system. Once the userspace enable this +capability and select the KVM_BUS_LOCK_DETECTION_EXIT mode, KVM will set the +KVM_RUN_BUS_LOCK flag in vcpu-run->flags field and exit to userspace. Concerning +the bus lock vm exit can be preempted by a higher priority VM exit, the exit +notifications to userspace can be KVM_EXIT_BUS_LOCK or other reasons. +KVM_RUN_BUS_LOCK flag is used to distinguish between them. + 8. Other capabilities. ====================== -- cgit v1.2.3 From db7d8e476821df85dce0cfd1e654c72c7db879dc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 15 Jan 2021 14:03:53 -0800 Subject: x86/apic: Export x2apic_mode for use by KVM in "warm" path Export x2apic_mode so that KVM can query whether x2APIC is active without having to incur the RDMSR in x2apic_enabled(). When Posted Interrupts are in use for a guest with an assigned device, KVM ends up checking for x2APIC at least once every time a vCPU halts. KVM could obviously snapshot x2apic_enabled() to avoid the RDMSR, but that's rather silly given that x2apic_mode holds the exact info needed by KVM. Signed-off-by: Sean Christopherson Message-Id: <20210115220354.434807-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kernel/apic/apic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 6bd20c0de8bc..dea2b44966ca 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1743,6 +1743,7 @@ void apic_ap_setup(void) #ifdef CONFIG_X86_X2APIC int x2apic_mode; +EXPORT_SYMBOL_GPL(x2apic_mode); enum { X2APIC_OFF, -- cgit v1.2.3 From 563c54c4d5b1230990e3b21ba5d29d79bd3e3b3d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 15 Jan 2021 14:03:54 -0800 Subject: KVM: VMX: Use x2apic_mode to avoid RDMSR when querying PI state Use x2apic_mode instead of x2apic_enabled() when adjusting the destination ID during Posted Interrupt updates. This avoids the costly RDMSR that is hidden behind x2apic_enabled(). Reported-by: luferry Signed-off-by: Sean Christopherson Message-Id: <20210115220354.434807-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index f02962dcc72c..4831bc44ce66 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -54,7 +54,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) dest = cpu_physical_id(cpu); - if (x2apic_enabled()) + if (x2apic_mode) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; @@ -104,7 +104,7 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) dest = cpu_physical_id(vcpu->cpu); - if (x2apic_enabled()) + if (x2apic_mode) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; @@ -174,7 +174,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu) */ dest = cpu_physical_id(vcpu->pre_pcpu); - if (x2apic_enabled()) + if (x2apic_mode) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; -- cgit v1.2.3 From d855066f81726155caf766e47eea58ae10b1fd57 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Fri, 8 Jan 2021 09:36:55 +0800 Subject: KVM: VMX: read/write MSR_IA32_DEBUGCTLMSR from GUEST_IA32_DEBUGCTL SVM already has specific handlers of MSR_IA32_DEBUGCTLMSR in the svm_get/set_msr, so the x86 common part can be safely moved to VMX. This allows KVM to store the bits it supports in GUEST_IA32_DEBUGCTL. Add vmx_supported_debugctl() to refactor the throwing logic of #GP. Signed-off-by: Like Xu Reviewed-by: Andi Kleen Message-Id: <20210108013704.134985-2-like.xu@linux.intel.com> [Merge parts of Chenyi Qiang's "KVM: X86: Expose bus lock debug exception to guest". - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/capabilities.h | 5 +++++ arch/x86/kvm/vmx/vmx.c | 23 +++++++++++++++++++---- arch/x86/kvm/x86.c | 16 ++-------------- arch/x86/kvm/x86.h | 2 ++ 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 5366ccdd134c..d0f31fcccd2b 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -384,4 +384,9 @@ static inline u64 vmx_get_perf_capabilities(void) return PMU_CAP_FW_WRITES; } +static inline u64 vmx_supported_debugctl(void) +{ + return 0; +} + #endif /* __KVM_X86_VMX_CAPS_H */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index af8d3a2db3d2..d4145c57438c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1924,6 +1924,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; goto find_uret_msr; + case MSR_IA32_DEBUGCTLMSR: + msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); + break; default: find_uret_msr: msr = vmx_find_uret_msr(vmx, msr_info->index); @@ -1997,14 +2000,26 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } vmcs_writel(GUEST_SYSENTER_ESP, data); break; - case MSR_IA32_DEBUGCTLMSR: + case MSR_IA32_DEBUGCTLMSR: { + u64 invalid = data & ~vmx_supported_debugctl(); + if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { + if (report_ignored_msrs) + vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", + __func__, data); + data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); + invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); + } + + if (invalid) + return 1; + if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) get_vmcs12(vcpu)->guest_ia32_debugctl = data; - ret = kvm_set_msr_common(vcpu, msr_info); - break; - + vmcs_write64(GUEST_IA32_DEBUGCTL, data); + return 0; + } case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8719877fe799..0ecbb4ecc363 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -117,8 +117,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool __read_mostly ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); -static bool __read_mostly report_ignored_msrs = true; +bool __read_mostly report_ignored_msrs = true; module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); +EXPORT_SYMBOL_GPL(report_ignored_msrs); unsigned int min_timer_period_us = 200; module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); @@ -3073,18 +3074,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; } break; - case MSR_IA32_DEBUGCTLMSR: - if (!data) { - /* We support the non-activated case already */ - break; - } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { - /* Values other than LBR and BTF are vendor-specific, - thus reserved and should throw a #GP */ - return 1; - } else if (report_ignored_msrs) - vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", - __func__, data); - break; case 0x200 ... 0x2ff: return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: @@ -3357,7 +3346,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: - case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 0f727b50bd3d..5214b3be30ad 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -296,6 +296,8 @@ extern int pi_inject_timer; extern struct static_key kvm_no_apic_vcpu; +extern bool report_ignored_msrs; + static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, -- cgit v1.2.3 From 252e365eb28ddf49eb31cec1a5d99e708c73f57b Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:29 +0800 Subject: KVM: x86/vmx: Make vmx_set_intercept_for_msr() non-static To make code responsibilities clear, we may resue and invoke the vmx_set_intercept_for_msr() in other vmx-specific files (e.g. pmu_intel.c), so expose it to passthrough LBR msrs later. Signed-off-by: Like Xu Reviewed-by: Andi Kleen Message-Id: <20210201051039.255478-2-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/vmx/vmx.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d4145c57438c..c4b15f9f69b0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3790,7 +3790,7 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, vmx_set_msr_bitmap_write(msr_bitmap, msr); } -static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value) { if (value) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index cf335e94f198..94bf7fd4e6ea 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -364,6 +364,8 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, + u32 msr, int type, bool value); static inline u8 vmx_get_rvi(void) { -- cgit v1.2.3 From a755753903a40d982f6dd23d65eb96b248a2577a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 2 Feb 2021 09:32:35 -0500 Subject: KVM: x86/pmu: preserve IA32_PERF_CAPABILITIES across CPUID refresh Once MSR_IA32_PERF_CAPABILITIES is changed via vmx_set_msr(), the value should not be changed by cpuid(). To ensure that the new value is kept, the default initialization path is moved to intel_pmu_init(). The effective value of the MSR will be 0 if PDCM is clear, however. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index cdf5f34518f4..7403d46998d6 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -152,12 +152,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, return &counters[array_index_nospec(idx, num_counters)]; } -static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) +static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu) { if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) - return false; + return 0; - return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES; + return vcpu->arch.perf_capabilities; +} + +static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) +{ + return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0; } static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) @@ -327,7 +332,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; pmu->reserved_bits = 0xffffffff00200000ull; - vcpu->arch.perf_capabilities = 0; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) @@ -340,8 +344,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) return; perf_get_x86_pmu_capability(&x86_pmu); - if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) - vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, x86_pmu.num_counters_gp); @@ -405,6 +407,8 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; pmu->fixed_counters[i].current_config = 0; } + + vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); } static void intel_pmu_reset(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 9c9520ce883386dc3794c7d60204487ff1db09cb Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 2 Feb 2021 09:36:08 -0500 Subject: KVM: vmx/pmu: Add PMU_CAP_LBR_FMT check when guest LBR is enabled Usespace could set the bits [0, 5] of the IA32_PERF_CAPABILITIES MSR which tells about the record format stored in the LBR records. The LBR will be enabled on the guest if host perf supports LBR (checked via x86_perf_get_lbr()) and the vcpu model is compatible with the host one. Signed-off-by: Like Xu Message-Id: <20210201051039.255478-4-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/capabilities.h | 1 + arch/x86/kvm/vmx/pmu_intel.c | 19 +++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 12 ++++++++++++ arch/x86/kvm/vmx/vmx.h | 11 +++++++++++ 4 files changed, 43 insertions(+) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index d0f31fcccd2b..da3db1a37b34 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -19,6 +19,7 @@ extern int __read_mostly pt_mode; #define PT_MODE_HOST_GUEST 1 #define PMU_CAP_FW_WRITES (1ULL << 13) +#define PMU_CAP_LBR_FMT 0x3f struct nested_vmx_msrs { /* diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 7403d46998d6..d21104e6f9ec 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -173,6 +173,16 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); } +bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) +{ + /* + * As a first step, a guest could only enable LBR feature if its + * cpu model is the same as the host because the LBR registers + * would be pass-through to the guest and they're model specific. + */ + return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); +} + static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -321,6 +331,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static void intel_pmu_refresh(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + struct x86_pmu_capability x86_pmu; struct kvm_cpuid_entry2 *entry; union cpuid10_eax eax; @@ -387,12 +399,18 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); nested_vmx_pmu_entry_exit_ctls_update(vcpu); + + if (intel_pmu_lbr_is_compatible(vcpu)) + x86_perf_get_lbr(&lbr_desc->records); + else + lbr_desc->records.nr = 0; } static void intel_pmu_init(struct kvm_vcpu *vcpu) { int i; struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { pmu->gp_counters[i].type = KVM_PMC_GP; @@ -409,6 +427,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) } vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); + lbr_desc->records.nr = 0; } static void intel_pmu_reset(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c4b15f9f69b0..9a978a49721b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2211,6 +2211,18 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if ((data >> 32) != 0) return 1; goto find_uret_msr; + case MSR_IA32_PERF_CAPABILITIES: + if (data && !vcpu_to_pmu(vcpu)->version) + return 1; + if (data & PMU_CAP_LBR_FMT) { + if ((data & PMU_CAP_LBR_FMT) != + (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)) + return 1; + if (!intel_pmu_lbr_is_compatible(vcpu)) + return 1; + } + ret = kvm_set_msr_common(vcpu, msr_info); + break; default: find_uret_msr: diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 94bf7fd4e6ea..df61b0d09eb7 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -93,6 +93,16 @@ union vmx_exit_reason { u32 full; }; +#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc) +#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) + +bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); + +struct lbr_desc { + /* Basic info about guest LBR records. */ + struct x86_pmu_lbr records; +}; + /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -302,6 +312,7 @@ struct vcpu_vmx { u64 ept_pointer; struct pt_desc pt_desc; + struct lbr_desc lbr_desc; /* Save desired MSR intercept (read: pass-through) state */ #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13 -- cgit v1.2.3 From c646236344e9054cc84cd5a9f763163b9654cf7e Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:31 +0800 Subject: KVM: vmx/pmu: Add PMU_CAP_LBR_FMT check when guest LBR is enabled Usespace could set the bits [0, 5] of the IA32_PERF_CAPABILITIES MSR which tells about the record format stored in the LBR records. The LBR will be enabled on the guest if host perf supports LBR (checked via x86_perf_get_lbr()) and the vcpu model is compatible with the host one. Signed-off-by: Like Xu Message-Id: <20210201051039.255478-4-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/capabilities.h | 7 ++++++- arch/x86/kvm/vmx/pmu_intel.c | 7 +++++++ arch/x86/kvm/vmx/vmx.c | 12 +++++++++++- arch/x86/kvm/vmx/vmx.h | 1 + 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index da3db1a37b34..787edd8c1fc7 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -387,7 +387,12 @@ static inline u64 vmx_get_perf_capabilities(void) static inline u64 vmx_supported_debugctl(void) { - return 0; + u64 debugctl = 0; + + if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) + debugctl |= DEBUGCTLMSR_LBR; + + return debugctl; } #endif /* __KVM_X86_VMX_CAPS_H */ diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index d21104e6f9ec..48529dd127bb 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -183,6 +183,13 @@ bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); } +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) +{ + struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); + + return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT); +} + static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9a978a49721b..c5cbef2402b8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1950,6 +1950,16 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, return (unsigned long)data; } +static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) +{ + u64 debugctl = vmx_supported_debugctl(); + + if (!intel_pmu_lbr_is_enabled(vcpu)) + debugctl &= ~DEBUGCTLMSR_LBR; + + return debugctl; +} + /* * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -2001,7 +2011,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_DEBUGCTLMSR: { - u64 invalid = data & ~vmx_supported_debugctl(); + u64 invalid = data & ~vcpu_supported_debugctl(vcpu); if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { if (report_ignored_msrs) vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index df61b0d09eb7..3836daf7d489 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -97,6 +97,7 @@ union vmx_exit_reason { #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); struct lbr_desc { /* Basic info about guest LBR records. */ -- cgit v1.2.3 From 8e12911b243e485f5e4c7c5fbc79cdf185728700 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:33 +0800 Subject: KVM: vmx/pmu: Create a guest LBR event when vcpu sets DEBUGCTLMSR_LBR When vcpu sets DEBUGCTLMSR_LBR in the MSR_IA32_DEBUGCTLMSR, the KVM handler would create a guest LBR event which enables the callstack mode and none of hardware counter is assigned. The host perf would schedule and enable this event as usual but in an exclusive way. The guest LBR event will be released when the vPMU is reset but soon, the lazy release mechanism would be applied to this event like a vPMC. Suggested-by: Andi Kleen Co-developed-by: Wei Wang Signed-off-by: Wei Wang Signed-off-by: Like Xu Reviewed-by: Andi Kleen Message-Id: <20210201051039.255478-6-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 63 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 3 +++ arch/x86/kvm/vmx/vmx.h | 10 +++++++ 3 files changed, 76 insertions(+) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 48529dd127bb..dbaa175a3c2e 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -224,6 +224,66 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) return pmc; } +static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (lbr_desc->event) { + perf_event_release_kernel(lbr_desc->event); + lbr_desc->event = NULL; + vcpu_to_pmu(vcpu)->event_count--; + } +} + +int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct perf_event *event; + + /* + * The perf_event_attr is constructed in the minimum efficient way: + * - set 'pinned = true' to make it task pinned so that if another + * cpu pinned event reclaims LBR, the event->oncpu will be set to -1; + * - set '.exclude_host = true' to record guest branches behavior; + * + * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf + * schedule the event without a real HW counter but a fake one; + * check is_guest_lbr_event() and __intel_get_event_constraints(); + * + * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and + * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | + * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack + * event, which helps KVM to save/restore guest LBR records + * during host context switches and reduces quite a lot overhead, + * check branch_user_callstack() and intel_pmu_lbr_sched_task(); + */ + struct perf_event_attr attr = { + .type = PERF_TYPE_RAW, + .size = sizeof(attr), + .config = INTEL_FIXED_VLBR_EVENT, + .sample_type = PERF_SAMPLE_BRANCH_STACK, + .pinned = true, + .exclude_host = true, + .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | + PERF_SAMPLE_BRANCH_USER, + }; + + if (unlikely(lbr_desc->event)) + return 0; + + event = perf_event_create_kernel_counter(&attr, -1, + current, NULL, NULL); + if (IS_ERR(event)) { + pr_debug_ratelimited("%s: failed %ld\n", + __func__, PTR_ERR(event)); + return -ENOENT; + } + lbr_desc->event = event; + pmu->event_count++; + return 0; +} + static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -435,6 +495,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); lbr_desc->records.nr = 0; + lbr_desc->event = NULL; } static void intel_pmu_reset(struct kvm_vcpu *vcpu) @@ -459,6 +520,8 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = pmu->global_ovf_ctrl = 0; + + intel_pmu_release_guest_lbr_event(vcpu); } struct kvm_pmu_ops intel_pmu_ops = { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c5cbef2402b8..194458077dcb 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2028,6 +2028,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) get_vmcs12(vcpu)->guest_ia32_debugctl = data; vmcs_write64(GUEST_IA32_DEBUGCTL, data); + if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && + (data & DEBUGCTLMSR_LBR)) + intel_pmu_create_guest_lbr_event(vcpu); return 0; } case MSR_IA32_BNDCFGS: diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 3836daf7d489..8f33832db044 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -99,9 +99,19 @@ union vmx_exit_reason { bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); +int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); + struct lbr_desc { /* Basic info about guest LBR records. */ struct x86_pmu_lbr records; + + /* + * Emulate LBR feature via passthrough LBR registers when the + * per-vcpu guest LBR event is scheduled on the current pcpu. + * + * The records may be inaccurate if the host reclaims the LBR. + */ + struct perf_event *event; }; /* -- cgit v1.2.3 From 1b5ac3226a1aa071135fe0ee5d1055d9e88b717c Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:34 +0800 Subject: KVM: vmx/pmu: Pass-through LBR msrs when the guest LBR event is ACTIVE In addition to DEBUGCTLMSR_LBR, any KVM trap caused by LBR msrs access will result in a creation of guest LBR event per-vcpu. If the guest LBR event is scheduled on with the corresponding vcpu context, KVM will pass-through all LBR records msrs to the guest. The LBR callstack mechanism implemented in the host could help save/restore the guest LBR records during the event context switches, which reduces a lot of overhead if we save/restore tens of LBR msrs (e.g. 32 LBR records entries) in the much more frequent VMX transitions. To avoid reclaiming LBR resources from any higher priority event on host, KVM would always check the exist of guest LBR event and its state before vm-entry as late as possible. A negative result would cancel the pass-through state, and it also prevents real registers accesses and potential data leakage. If host reclaims the LBR between two checks, the interception state and LBR records can be safely preserved due to native save/restore support from guest LBR event. The KVM emits a pr_warn() when the LBR hardware is unavailable to the guest LBR event. The administer is supposed to reminder users that the guest result may be inaccurate if someone is using LBR to record hypervisor on the host side. Suggested-by: Andi Kleen Co-developed-by: Wei Wang Signed-off-by: Wei Wang Signed-off-by: Like Xu Reviewed-by: Andi Kleen Message-Id: <20210201051039.255478-7-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 127 ++++++++++++++++++++++++++++++++++++++++++- arch/x86/kvm/vmx/vmx.c | 10 ++++ arch/x86/kvm/vmx/vmx.h | 1 + 3 files changed, 135 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index dbaa175a3c2e..c7dbaaccbcaa 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -190,6 +190,24 @@ bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT); } +static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) +{ + struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); + bool ret = false; + + if (!intel_pmu_lbr_is_enabled(vcpu)) + return ret; + + ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || + (index >= records->from && index < records->from + records->nr) || + (index >= records->to && index < records->to + records->nr); + + if (!ret && records->info) + ret = (index >= records->info && index < records->info + records->nr); + + return ret; +} + static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -205,7 +223,8 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) default: ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || - get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr); + get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || + intel_pmu_is_valid_lbr_msr(vcpu, msr); break; } @@ -284,6 +303,46 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) return 0; } +/* + * It's safe to access LBR msrs from guest when they have not + * been passthrough since the host would help restore or reset + * the LBR msrs records when the guest LBR event is scheduled in. + */ +static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, + struct msr_data *msr_info, bool read) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + u32 index = msr_info->index; + + if (!intel_pmu_is_valid_lbr_msr(vcpu, index)) + return false; + + if (!lbr_desc->event && !intel_pmu_create_guest_lbr_event(vcpu)) + goto dummy; + + /* + * Disable irq to ensure the LBR feature doesn't get reclaimed by the + * host at the time the value is read from the msr, and this avoids the + * host LBR value to be leaked to the guest. If LBR has been reclaimed, + * return 0 on guest reads. + */ + local_irq_disable(); + if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { + if (read) + rdmsrl(index, msr_info->data); + else + wrmsrl(index, msr_info->data); + local_irq_enable(); + return true; + } + local_irq_enable(); + +dummy: + if (read) + msr_info->data = 0; + return true; +} + static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -318,7 +377,8 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { msr_info->data = pmc->eventsel; return 0; - } + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) + return 0; } return 1; @@ -389,7 +449,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) reprogram_gp_counter(pmc, data); return 0; } - } + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) + return 0; } return 1; @@ -524,6 +585,66 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) intel_pmu_release_guest_lbr_event(vcpu); } +static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) +{ + struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); + int i; + + for (i = 0; i < lbr->nr; i++) { + vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set); + vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set); + if (lbr->info) + vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); + } + + vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); + vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); +} + +static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) +{ + vmx_update_intercept_for_lbr_msrs(vcpu, true); +} + +static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) +{ + vmx_update_intercept_for_lbr_msrs(vcpu, false); +} + +/* + * Higher priority host perf events (e.g. cpu pinned) could reclaim the + * pmu resources (e.g. LBR) that were assigned to the guest. This is + * usually done via ipi calls (more details in perf_install_in_context). + * + * Before entering the non-root mode (with irq disabled here), double + * confirm that the pmu features enabled to the guest are not reclaimed + * by higher priority host events. Otherwise, disallow vcpu's access to + * the reclaimed features. + */ +void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) +{ + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (!lbr_desc->event) { + vmx_disable_lbr_msrs_passthrough(vcpu); + if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) + goto warn; + return; + } + + if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { + vmx_disable_lbr_msrs_passthrough(vcpu); + goto warn; + } else + vmx_enable_lbr_msrs_passthrough(vcpu); + + return; + +warn: + pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", + vcpu->vcpu_id); +} + struct kvm_pmu_ops intel_pmu_ops = { .find_arch_event = intel_find_arch_event, .find_fixed_event = intel_find_fixed_event, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 194458077dcb..e2f8c3c68f02 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -658,6 +658,14 @@ static bool is_valid_passthrough_msr(u32 msr) case MSR_IA32_RTIT_CR3_MATCH: case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: /* PT MSRs. These are handled in pt_update_intercept_for_msr() */ + case MSR_LBR_SELECT: + case MSR_LBR_TOS: + case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31: + case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31: + case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: + case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: + case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: + /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ return true; } @@ -6769,6 +6777,8 @@ reenter_guest: pt_guest_enter(vmx); atomic_switch_perf_msrs(vmx); + if (intel_pmu_lbr_is_enabled(vcpu)) + vmx_passthrough_lbr_msrs(vcpu); if (enable_preemption_timer) vmx_update_hv_timer(vcpu); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 8f33832db044..41bf9adfcb79 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -100,6 +100,7 @@ bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); +void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu); struct lbr_desc { /* Basic info about guest LBR records. */ -- cgit v1.2.3 From 9254beaafd12e27d48149fab3b16db372bc90ad7 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:35 +0800 Subject: KVM: vmx/pmu: Reduce the overhead of LBR pass-through or cancellation When the LBR records msrs has already been pass-through, there is no need to call vmx_update_intercept_for_lbr_msrs() again and again, and vice versa. Signed-off-by: Like Xu Reviewed-by: Andi Kleen Message-Id: <20210201051039.255478-8-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 13 +++++++++++++ arch/x86/kvm/vmx/vmx.h | 3 +++ 2 files changed, 16 insertions(+) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index c7dbaaccbcaa..254d9fc09863 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -557,6 +557,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); lbr_desc->records.nr = 0; lbr_desc->event = NULL; + lbr_desc->msr_passthrough = false; } static void intel_pmu_reset(struct kvm_vcpu *vcpu) @@ -603,12 +604,24 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) { + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (!lbr_desc->msr_passthrough) + return; + vmx_update_intercept_for_lbr_msrs(vcpu, true); + lbr_desc->msr_passthrough = false; } static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) { + struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); + + if (lbr_desc->msr_passthrough) + return; + vmx_update_intercept_for_lbr_msrs(vcpu, false); + lbr_desc->msr_passthrough = true; } /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 41bf9adfcb79..b5679d1e02c4 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -113,6 +113,9 @@ struct lbr_desc { * The records may be inaccurate if the host reclaims the LBR. */ struct perf_event *event; + + /* True if LBRs are marked as not intercepted in the MSR bitmap */ + bool msr_passthrough; }; /* -- cgit v1.2.3 From e6209a3bef793e8fe29c873a7612023916eaa611 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:36 +0800 Subject: KVM: vmx/pmu: Emulate legacy freezing LBRs on virtual PMI The current vPMU only supports Architecture Version 2. According to Intel SDM "17.4.7 Freezing LBR and Performance Counters on PMI", if IA32_DEBUGCTL.Freeze_LBR_On_PMI = 1, the LBR is frozen on the virtual PMI and the KVM would emulate to clear the LBR bit (bit 0) in IA32_DEBUGCTL. Also, guest needs to re-enable IA32_DEBUGCTL.LBR to resume recording branches. Signed-off-by: Like Xu Reviewed-by: Andi Kleen Message-Id: <20210201051039.255478-9-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 5 ++++- arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/vmx/capabilities.h | 4 +++- arch/x86/kvm/vmx/pmu_intel.c | 30 ++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 2 +- 5 files changed, 39 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 67741d2a0308..405890c723a1 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -383,8 +383,11 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) { - if (lapic_in_kernel(vcpu)) + if (lapic_in_kernel(vcpu)) { + if (kvm_x86_ops.pmu_ops->deliver_pmi) + kvm_x86_ops.pmu_ops->deliver_pmi(vcpu); kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); + } } bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 067fef51760c..742a4e98df8c 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -39,6 +39,7 @@ struct kvm_pmu_ops { void (*refresh)(struct kvm_vcpu *vcpu); void (*init)(struct kvm_vcpu *vcpu); void (*reset)(struct kvm_vcpu *vcpu); + void (*deliver_pmi)(struct kvm_vcpu *vcpu); }; static inline u64 pmc_bitmask(struct kvm_pmc *pmc) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 787edd8c1fc7..8e6179586e27 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -21,6 +21,8 @@ extern int __read_mostly pt_mode; #define PMU_CAP_FW_WRITES (1ULL << 13) #define PMU_CAP_LBR_FMT 0x3f +#define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) + struct nested_vmx_msrs { /* * We only store the "true" versions of the VMX capability MSRs. We @@ -390,7 +392,7 @@ static inline u64 vmx_supported_debugctl(void) u64 debugctl = 0; if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) - debugctl |= DEBUGCTLMSR_LBR; + debugctl |= DEBUGCTLMSR_LBR_MASK; return debugctl; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 254d9fc09863..7928d6dee0a2 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -586,6 +586,35 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) intel_pmu_release_guest_lbr_event(vcpu); } +/* + * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4. + * + * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and + * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL. + * + * Guest needs to re-enable LBR to resume branches recording. + */ +static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) +{ + u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL); + + if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) { + data &= ~DEBUGCTLMSR_LBR; + vmcs_write64(GUEST_IA32_DEBUGCTL, data); + } +} + +static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu) +{ + u8 version = vcpu_to_pmu(vcpu)->version; + + if (!intel_pmu_lbr_is_enabled(vcpu)) + return; + + if (version > 1 && version < 4) + intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu); +} + static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) { struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); @@ -672,4 +701,5 @@ struct kvm_pmu_ops intel_pmu_ops = { .refresh = intel_pmu_refresh, .init = intel_pmu_init, .reset = intel_pmu_reset, + .deliver_pmi = intel_pmu_deliver_pmi, }; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e2f8c3c68f02..9169c700874e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1963,7 +1963,7 @@ static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) u64 debugctl = vmx_supported_debugctl(); if (!intel_pmu_lbr_is_enabled(vcpu)) - debugctl &= ~DEBUGCTLMSR_LBR; + debugctl &= ~DEBUGCTLMSR_LBR_MASK; return debugctl; } -- cgit v1.2.3 From 9aa4f622460f9287e57804dbeb219bfef29f04a1 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:37 +0800 Subject: KVM: vmx/pmu: Release guest LBR event via lazy release mechanism The vPMU uses GUEST_LBR_IN_USE_IDX (bit 58) in 'pmu->pmc_in_use' to indicate whether a guest LBR event is still needed by the vcpu. If the vcpu no longer accesses LBR related registers within a scheduling time slice, and the enable bit of LBR has been unset, vPMU will treat the guest LBR event as a bland event of a vPMC counter and release it as usual. Also, the pass-through state of LBR records msrs is cancelled. Signed-off-by: Like Xu Message-Id: <20210201051039.255478-10-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 3 +++ arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/vmx/pmu_intel.c | 21 ++++++++++++++++++++- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 405890c723a1..136dc2f3c5d3 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -476,6 +476,9 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) pmc_stop_counter(pmc); } + if (kvm_x86_ops.pmu_ops->cleanup) + kvm_x86_ops.pmu_ops->cleanup(vcpu); + bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 742a4e98df8c..7b30bc967af3 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -40,6 +40,7 @@ struct kvm_pmu_ops { void (*init)(struct kvm_vcpu *vcpu); void (*reset)(struct kvm_vcpu *vcpu); void (*deliver_pmi)(struct kvm_vcpu *vcpu); + void (*cleanup)(struct kvm_vcpu *vcpu); }; static inline u64 pmc_bitmask(struct kvm_pmc *pmc) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 7928d6dee0a2..d1df618cb7de 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -288,8 +288,10 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) PERF_SAMPLE_BRANCH_USER, }; - if (unlikely(lbr_desc->event)) + if (unlikely(lbr_desc->event)) { + __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); return 0; + } event = perf_event_create_kernel_counter(&attr, -1, current, NULL, NULL); @@ -300,6 +302,7 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) } lbr_desc->event = event; pmu->event_count++; + __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); return 0; } @@ -332,9 +335,11 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, rdmsrl(index, msr_info->data); else wrmsrl(index, msr_info->data); + __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); local_irq_enable(); return true; } + clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); local_irq_enable(); dummy: @@ -532,6 +537,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) x86_perf_get_lbr(&lbr_desc->records); else lbr_desc->records.nr = 0; + + if (lbr_desc->records.nr) + bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); } static void intel_pmu_init(struct kvm_vcpu *vcpu) @@ -665,17 +673,21 @@ static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) */ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) { + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); if (!lbr_desc->event) { vmx_disable_lbr_msrs_passthrough(vcpu); if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) goto warn; + if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) + goto warn; return; } if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { vmx_disable_lbr_msrs_passthrough(vcpu); + __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); goto warn; } else vmx_enable_lbr_msrs_passthrough(vcpu); @@ -687,6 +699,12 @@ warn: vcpu->vcpu_id); } +static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) +{ + if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) + intel_pmu_release_guest_lbr_event(vcpu); +} + struct kvm_pmu_ops intel_pmu_ops = { .find_arch_event = intel_find_arch_event, .find_fixed_event = intel_find_fixed_event, @@ -702,4 +720,5 @@ struct kvm_pmu_ops intel_pmu_ops = { .init = intel_pmu_init, .reset = intel_pmu_reset, .deliver_pmi = intel_pmu_deliver_pmi, + .cleanup = intel_pmu_cleanup, }; -- cgit v1.2.3 From be635e34c284d08b1da7f93ddd6a2110617d15e7 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:38 +0800 Subject: KVM: vmx/pmu: Expose LBR_FMT in the MSR_IA32_PERF_CAPABILITIES Userspace could enable guest LBR feature when the exactly supported LBR format value is initialized to the MSR_IA32_PERF_CAPABILITIES and the LBR is also compatible with vPMU version and host cpu model. The LBR could be enabled on the guest if host perf supports LBR (checked via x86_perf_get_lbr()) and the vcpu model is compatible with the host one. Signed-off-by: Like Xu Message-Id: <20210201051039.255478-11-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/capabilities.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 8e6179586e27..d1d77985e889 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -380,11 +380,18 @@ static inline bool vmx_pt_mode_is_host_guest(void) static inline u64 vmx_get_perf_capabilities(void) { + u64 perf_cap = 0; + + if (boot_cpu_has(X86_FEATURE_PDCM)) + rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap); + + perf_cap &= PMU_CAP_LBR_FMT; + /* * Since counters are virtualized, KVM would support full * width counting unconditionally, even if the host lacks it. */ - return PMU_CAP_FW_WRITES; + return PMU_CAP_FW_WRITES | perf_cap; } static inline u64 vmx_supported_debugctl(void) -- cgit v1.2.3 From f88d4f2f287ec062e985b60cbe60f04bd5a8e659 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Mon, 1 Feb 2021 13:10:39 +0800 Subject: selftests: kvm/x86: add test for pmu msr MSR_IA32_PERF_CAPABILITIES This test will check the effect of various CPUID settings on the MSR_IA32_PERF_CAPABILITIES MSR, check that whatever user space writes with KVM_SET_MSR is _not_ modified from the guest and can be retrieved with KVM_GET_MSR, and check that invalid LBR formats are rejected. Signed-off-by: Like Xu Message-Id: <20210201051039.255478-12-like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/processor.h | 3 +- tools/testing/selftests/kvm/lib/x86_64/processor.c | 34 ++++++ .../selftests/kvm/x86_64/vmx_pmu_msrs_test.c | 131 +++++++++++++++++++++ 5 files changed, 169 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index a747260662b6..c08f26d0aec4 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -26,6 +26,7 @@ /x86_64/vmx_tsc_adjust_test /x86_64/xapic_ipi_test /x86_64/xss_msr_test +/x86_64/vmx_pmu_msrs_test /demand_paging_test /dirty_log_test /dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index fcc46355d7e1..60410e9ebd41 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -60,6 +60,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test +TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 74ce0fe420ab..fce4cd4d4895 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -338,8 +338,9 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state); struct kvm_msr_list *kvm_get_msr_index_list(void); - +uint64_t kvm_get_feature_msr(uint64_t msr_index); struct kvm_cpuid2 *kvm_get_supported_cpuid(void); + void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 95e1a757c629..66926a7f29cc 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -669,6 +669,40 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) return cpuid; } +/* + * KVM Get MSR + * + * Input Args: + * msr_index - Index of MSR + * + * Output Args: None + * + * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. + * + * Get value of MSR for VCPU. + */ +uint64_t kvm_get_feature_msr(uint64_t msr_index) +{ + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r, kvm_fd; + + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); + if (kvm_fd < 0) + exit(KSFT_SKIP); + + r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); + + close(kvm_fd); + return buffer.entry.data; +} + /* * Locate a cpuid entry. * diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c new file mode 100644 index 000000000000..23051d84b907 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * VMX-pmu related msrs test + * + * Copyright (C) 2021 Intel Corporation + * + * Test to check the effect of various CPUID settings + * on the MSR_IA32_PERF_CAPABILITIES MSR, and check that + * whatever we write with KVM_SET_MSR is _not_ modified + * in the guest and test it can be retrieved with KVM_GET_MSR. + * + * Test to check that invalid LBR formats are rejected. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include + +#include "kvm_util.h" +#include "vmx.h" + +#define VCPU_ID 0 + +#define X86_FEATURE_PDCM (1<<15) +#define PMU_CAP_FW_WRITES (1ULL << 13) +#define PMU_CAP_LBR_FMT 0x3f + +union cpuid10_eax { + struct { + unsigned int version_id:8; + unsigned int num_counters:8; + unsigned int bit_width:8; + unsigned int mask_length:8; + } split; + unsigned int full; +}; + +union perf_capabilities { + struct { + u64 lbr_format:6; + u64 pebs_trap:1; + u64 pebs_arch_reg:1; + u64 pebs_format:4; + u64 smm_freeze:1; + u64 full_width_write:1; + u64 pebs_baseline:1; + u64 perf_metrics:1; + u64 pebs_output_pt_available:1; + u64 anythread_deprecated:1; + }; + u64 capabilities; +}; + +static void guest_code(void) +{ + wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); +} + +int main(int argc, char *argv[]) +{ + struct kvm_cpuid2 *cpuid; + struct kvm_cpuid_entry2 *entry_1_0; + struct kvm_cpuid_entry2 *entry_a_0; + bool pdcm_supported = false; + struct kvm_vm *vm; + int ret; + union cpuid10_eax eax; + union perf_capabilities host_cap; + + host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); + host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT); + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + cpuid = kvm_get_supported_cpuid(); + + if (kvm_get_cpuid_max_basic() >= 0xa) { + entry_1_0 = kvm_get_supported_cpuid_index(1, 0); + entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0); + pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM); + eax.full = entry_a_0->eax; + } + if (!pdcm_supported) { + print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU"); + exit(KSFT_SKIP); + } + if (!eax.split.version_id) { + print_skip("PMU is not supported by the vCPU"); + exit(KSFT_SKIP); + } + + /* testcase 1, set capabilities when we have PDCM bit */ + vcpu_set_cpuid(vm, VCPU_ID, cpuid); + vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); + + /* check capabilities can be retrieved with KVM_GET_MSR */ + ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); + + /* check whatever we write with KVM_SET_MSR is _not_ modified */ + vcpu_run(vm, VCPU_ID); + ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); + + /* testcase 2, check valid LBR formats are accepted */ + vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0); + ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0); + + vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format); + ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); + + /* testcase 3, check invalid LBR format is rejected */ + ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); + TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); + + /* testcase 4, set capabilities when we don't have PDCM bit */ + entry_1_0->ecx &= ~X86_FEATURE_PDCM; + vcpu_set_cpuid(vm, VCPU_ID, cpuid); + ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); + TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); + + /* testcase 5, set capabilities when we don't have PMU version bits */ + entry_1_0->ecx |= X86_FEATURE_PDCM; + eax.split.version_id = 0; + entry_1_0->ecx = eax.full; + vcpu_set_cpuid(vm, VCPU_ID, cpuid); + ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); + TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); + + vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0); + ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0); + + kvm_vm_free(vm); +} -- cgit v1.2.3 From 9a3ecd5e2aa10af18d0d5a055122d6cc0b0944c7 Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Tue, 2 Feb 2021 17:04:31 +0800 Subject: KVM: X86: Rename DR6_INIT to DR6_ACTIVE_LOW DR6_INIT contains the 1-reserved bits as well as the bit that is cleared to 0 when the condition (e.g. RTM) happens. The value can be used to initialize dr6 and also be the XOR mask between the #DB exit qualification (or payload) and DR6. Concerning that DR6_INIT is used as initial value only once, rename it to DR6_ACTIVE_LOW and apply it in other places, which would make the incoming changes for bus lock debug exception more simple. Signed-off-by: Chenyi Qiang Message-Id: <20210202090433.13441-2-chenyi.qiang@intel.com> [Define DR6_FIXED_1 from DR6_ACTIVE_LOW and DR6_VOLATILE. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 12 ++++++++++-- arch/x86/kvm/emulate.c | 2 +- arch/x86/kvm/svm/nested.c | 2 +- arch/x86/kvm/svm/svm.c | 6 +++--- arch/x86/kvm/vmx/nested.c | 4 ++-- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 33 +++++++++++++++++++-------------- 7 files changed, 38 insertions(+), 25 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c0706022d22f..876ff1447337 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -203,9 +203,17 @@ enum x86_intercept_stage; #define DR6_BS (1 << 14) #define DR6_BT (1 << 15) #define DR6_RTM (1 << 16) -#define DR6_FIXED_1 0xfffe0ff0 -#define DR6_INIT 0xffff0ff0 +/* + * DR6_ACTIVE_LOW combines fixed-1 and active-low bits. + * We can regard all the bits in DR6_FIXED_1 as active_low bits; + * they will never be 0 for now, but when they are defined + * in the future it will require no code change. + * + * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. + */ +#define DR6_ACTIVE_LOW 0xffff0ff0 #define DR6_VOLATILE 0x0001e00f +#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) #define DR7_BP_EN_MASK 0x000000ff #define DR7_GE (1 << 9) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 66a08322988f..612ee691065f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4329,7 +4329,7 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt) ctxt->ops->get_dr(ctxt, 6, &dr6); dr6 &= ~DR_TRAP_BITS; - dr6 |= DR6_BD | DR6_RTM; + dr6 |= DR6_BD | DR6_ACTIVE_LOW; ctxt->ops->set_dr(ctxt, 6, dr6); return emulate_db(ctxt); } diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index db30670dd8c4..d679130d209b 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -392,7 +392,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) svm->vmcb->save.rsp = vmcb12->save.rsp; svm->vmcb->save.rip = vmcb12->save.rip; svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; - svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_FIXED_1 | DR6_RTM; + svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; svm->vmcb->save.cpl = vmcb12->save.cpl; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3442d44ca53b..c9f72f87deba 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1865,7 +1865,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); /* - * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here, + * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, * because db_interception might need it. We can do it before vmentry. */ vcpu->arch.dr6 = svm->vmcb->save.dr6; @@ -1916,7 +1916,7 @@ static int db_interception(struct vcpu_svm *svm) if (!(svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && !svm->nmi_singlestep) { - u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1; + u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); return 1; } @@ -3783,7 +3783,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) svm_set_dr6(svm, vcpu->arch.dr6); else - svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM); + svm_set_dr6(svm, DR6_ACTIVE_LOW); clgi(); kvm_load_guest_xsave_state(vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index cb48236cc24d..94cd3f8dd428 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -411,8 +411,8 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit if (nr == DB_VECTOR) { if (!has_payload) { payload = vcpu->arch.dr6; - payload &= ~(DR6_FIXED_1 | DR6_BT); - payload ^= DR6_RTM; + payload &= ~DR6_BT; + payload ^= DR6_ACTIVE_LOW; } *exit_qual = payload; } else diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9169c700874e..043a9bbd5ff1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4876,7 +4876,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); return 1; } - kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); fallthrough; case BP_VECTOR: @@ -5120,7 +5120,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { - vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1; + vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW; vcpu->run->debug.arch.dr7 = dr7; vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); vcpu->run->debug.arch.exception = DB_VECTOR; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ecbb4ecc363..fae4ccbd767b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -486,19 +486,24 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) */ vcpu->arch.dr6 &= ~DR_TRAP_BITS; /* - * DR6.RTM is set by all #DB exceptions that don't clear it. + * In order to reflect the #DB exception payload in guest + * dr6, three components need to be considered: active low + * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, + * DR6_BS and DR6_BT) + * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. + * In the target guest dr6: + * FIXED_1 bits should always be set. + * Active low bits should be cleared if 1-setting in payload. + * Active high bits should be set if 1-setting in payload. + * + * Note, the payload is compatible with the pending debug + * exceptions/exit qualification under VMX, that active_low bits + * are active high in payload. + * So they need to be flipped for DR6. */ - vcpu->arch.dr6 |= DR6_RTM; + vcpu->arch.dr6 |= DR6_ACTIVE_LOW; vcpu->arch.dr6 |= payload; - /* - * Bit 16 should be set in the payload whenever the #DB - * exception should clear DR6.RTM. This makes the payload - * compatible with the pending debug exceptions under VMX. - * Though not currently documented in the SDM, this also - * makes the payload compatible with the exit qualification - * for #DB exceptions under VMX. - */ - vcpu->arch.dr6 ^= payload & DR6_RTM; + vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW; /* * The #DB payload is defined as compatible with the 'pending @@ -7211,7 +7216,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) struct kvm_run *kvm_run = vcpu->run; if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; @@ -7255,7 +7260,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) vcpu->arch.eff_db); if (dr6 != 0) { - kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; kvm_run->debug.arch.pc = eip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; @@ -10118,7 +10123,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); - vcpu->arch.dr6 = DR6_INIT; + vcpu->arch.dr6 = DR6_ACTIVE_LOW; vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); -- cgit v1.2.3 From 4aa2691dcbd38ce1c461188799d863398dd2865d Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Tue, 26 Jan 2021 03:18:28 -0500 Subject: KVM: x86: Factor out x86 instruction emulation with decoding Move the instruction decode part out of x86_emulate_instruction() for it to be used in other places. Also kvm_clear_exception_queue() is moved inside the if-statement as it doesn't apply when KVM are coming back from userspace. Co-developed-by: Bandan Das Signed-off-by: Bandan Das Signed-off-by: Wei Huang Message-Id: <20210126081831.570253-2-wei.huang2@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 62 ++++++++++++++++++++++++++++++++++-------------------- arch/x86/kvm/x86.h | 2 ++ 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fae4ccbd767b..ae8efb0442cf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7317,6 +7317,42 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) return false; } +/* + * Decode to be emulated instruction. Return EMULATION_OK if success. + */ +int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, + void *insn, int insn_len) +{ + int r = EMULATION_OK; + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + + init_emulate_ctxt(vcpu); + + /* + * We will reenter on the same instruction since we do not set + * complete_userspace_io. This does not handle watchpoints yet, + * those would be handled in the emulate_ops. + */ + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_breakpoint(vcpu, &r)) + return r; + + ctxt->interruptibility = 0; + ctxt->have_exception = false; + ctxt->exception.vector = -1; + ctxt->perm_ok = false; + + ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; + + r = x86_decode_insn(ctxt, insn, insn_len); + + trace_kvm_emulate_insn_start(vcpu); + ++vcpu->stat.insn_emulation; + + return r; +} +EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); + int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len) { @@ -7336,32 +7372,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, */ write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; vcpu->arch.write_fault_to_shadow_pgtable = false; - kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { - init_emulate_ctxt(vcpu); - - /* - * We will reenter on the same instruction since - * we do not set complete_userspace_io. This does not - * handle watchpoints yet, those would be handled in - * the emulate_ops. - */ - if (!(emulation_type & EMULTYPE_SKIP) && - kvm_vcpu_check_breakpoint(vcpu, &r)) - return r; - - ctxt->interruptibility = 0; - ctxt->have_exception = false; - ctxt->exception.vector = -1; - ctxt->perm_ok = false; - - ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; - - r = x86_decode_insn(ctxt, insn, insn_len); + kvm_clear_exception_queue(vcpu); - trace_kvm_emulate_insn_start(vcpu); - ++vcpu->stat.insn_emulation; + r = x86_decode_emulated_instruction(vcpu, emulation_type, + insn, insn_len); if (r != EMULATION_OK) { if ((emulation_type & EMULTYPE_TRAP_UD) || (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 5214b3be30ad..f88045a8af53 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -273,6 +273,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); +int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, + void *insn, int insn_len); int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len); fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 82a11e9c6fa2b50a7fff542fb782359dc1eab6bf Mon Sep 17 00:00:00 2001 From: Bandan Das Date: Tue, 26 Jan 2021 03:18:29 -0500 Subject: KVM: SVM: Add emulation support for #GP triggered by SVM instructions While running SVM related instructions (VMRUN/VMSAVE/VMLOAD), some AMD CPUs check EAX against reserved memory regions (e.g. SMM memory on host) before checking VMCB's instruction intercept. If EAX falls into such memory areas, #GP is triggered before VMEXIT. This causes problem under nested virtualization. To solve this problem, KVM needs to trap #GP and check the instructions triggering #GP. For VM execution instructions, KVM emulates these instructions. Co-developed-by: Wei Huang Signed-off-by: Wei Huang Signed-off-by: Bandan Das Message-Id: <20210126081831.570253-3-wei.huang2@amd.com> [Conditionally enable #GP intercept. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 109 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 91 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c9f72f87deba..073a364f1d1d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -200,6 +200,8 @@ module_param(sev_es, int, 0444); bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); +bool svm_gp_erratum_intercept = true; + static u8 rsm_ins_bytes[] = "\x0f\xaa"; static void svm_complete_interrupts(struct vcpu_svm *svm); @@ -288,6 +290,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if (!(efer & EFER_SVME)) { svm_leave_nested(svm); svm_set_gif(svm, true); + /* #GP intercept is still needed for vmware backdoor */ + if (!enable_vmware_backdoor) + clr_exception_intercept(svm, GP_VECTOR); /* * Free the nested guest state, unless we are in SMM. @@ -304,6 +309,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) vcpu->arch.efer = old_efer; return ret; } + + if (svm_gp_erratum_intercept) + set_exception_intercept(svm, GP_VECTOR); } } @@ -1962,24 +1970,6 @@ static int ac_interception(struct vcpu_svm *svm) return 1; } -static int gp_interception(struct vcpu_svm *svm) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; - u32 error_code = svm->vmcb->control.exit_info_1; - - WARN_ON_ONCE(!enable_vmware_backdoor); - - /* - * VMware backdoor emulation on #GP interception only handles IN{S}, - * OUT{S}, and RDPMC, none of which generate a non-zero error code. - */ - if (error_code) { - kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); - return 1; - } - return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); -} - static bool is_erratum_383(void) { int err, i; @@ -2178,6 +2168,89 @@ static int vmrun_interception(struct vcpu_svm *svm) return nested_svm_vmrun(svm); } +enum { + NONE_SVM_INSTR, + SVM_INSTR_VMRUN, + SVM_INSTR_VMLOAD, + SVM_INSTR_VMSAVE, +}; + +/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ +static int svm_instr_opcode(struct kvm_vcpu *vcpu) +{ + struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + + if (ctxt->b != 0x1 || ctxt->opcode_len != 2) + return NONE_SVM_INSTR; + + switch (ctxt->modrm) { + case 0xd8: /* VMRUN */ + return SVM_INSTR_VMRUN; + case 0xda: /* VMLOAD */ + return SVM_INSTR_VMLOAD; + case 0xdb: /* VMSAVE */ + return SVM_INSTR_VMSAVE; + default: + break; + } + + return NONE_SVM_INSTR; +} + +static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) +{ + int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = { + [SVM_INSTR_VMRUN] = vmrun_interception, + [SVM_INSTR_VMLOAD] = vmload_interception, + [SVM_INSTR_VMSAVE] = vmsave_interception, + }; + struct vcpu_svm *svm = to_svm(vcpu); + + return svm_instr_handlers[opcode](svm); +} + +/* + * #GP handling code. Note that #GP can be triggered under the following two + * cases: + * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on + * some AMD CPUs when EAX of these instructions are in the reserved memory + * regions (e.g. SMM memory on host). + * 2) VMware backdoor + */ +static int gp_interception(struct vcpu_svm *svm) +{ + struct kvm_vcpu *vcpu = &svm->vcpu; + u32 error_code = svm->vmcb->control.exit_info_1; + int opcode; + + /* Both #GP cases have zero error_code */ + if (error_code) + goto reinject; + + /* Decode the instruction for usage later */ + if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) + goto reinject; + + opcode = svm_instr_opcode(vcpu); + + if (opcode == NONE_SVM_INSTR) { + if (!enable_vmware_backdoor) + goto reinject; + + /* + * VMware backdoor emulation on #GP interception only handles + * IN{S}, OUT{S}, and RDPMC. + */ + return kvm_emulate_instruction(vcpu, + EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); + } else + return emulate_svm_instr(vcpu, opcode); + +reinject: + kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); + return 1; +} + void svm_set_gif(struct vcpu_svm *svm, bool value) { if (value) { -- cgit v1.2.3 From 3b9c723ed7cfa4e1eef338afaa57e94be2a60d9c Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Tue, 26 Jan 2021 03:18:30 -0500 Subject: KVM: SVM: Add support for SVM instruction address check change New AMD CPUs have a change that checks #VMEXIT intercept on special SVM instructions before checking their EAX against reserved memory region. This change is indicated by CPUID_0x8000000A_EDX[28]. If it is 1, #VMEXIT is triggered before #GP. KVM doesn't need to intercept and emulate #GP faults as #GP is supposed to be triggered. Co-developed-by: Bandan Das Signed-off-by: Bandan Das Signed-off-by: Wei Huang Reviewed-by: Maxim Levitsky Message-Id: <20210126081831.570253-4-wei.huang2@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kvm/svm/svm.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index bc33e319db56..f1957b3c8e4e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -338,6 +338,7 @@ #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ +#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 073a364f1d1d..0378d423044f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1040,6 +1040,9 @@ static __init int svm_hardware_setup(void) } } + if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) + svm_gp_erratum_intercept = false; + if (vgif) { if (!boot_cpu_has(X86_FEATURE_VGIF)) vgif = false; -- cgit v1.2.3 From 14c2bf81fcd2226ca7fb9b179320ca1ca7cb581a Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Tue, 26 Jan 2021 03:18:31 -0500 Subject: KVM: SVM: Fix #GP handling for doubly-nested virtualization Under the case of nested on nested (L0, L1, L2 are all hypervisors), we do not support emulation of the vVMLOAD/VMSAVE feature, the L0 hypervisor can inject the proper #VMEXIT to inform L1 of what is happening and L1 can avoid invoking the #GP workaround. For this reason we turns on guest VM's X86_FEATURE_SVME_ADDR_CHK bit for KVM running inside VM to receive the notification and change behavior. Similarly we check if vcpu is under guest mode before emulating the vmware-backdoor instructions. For the case of nested on nested, we let the guest handle it. Co-developed-by: Bandan Das Signed-off-by: Bandan Das Signed-off-by: Wei Huang Tested-by: Maxim Levitsky Reviewed-by: Maxim Levitsky Message-Id: <20210126081831.570253-5-wei.huang2@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0378d423044f..8d18f01b29c3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -933,6 +933,9 @@ static __init void svm_set_cpu_caps(void) if (npt_enabled) kvm_cpu_cap_set(X86_FEATURE_NPT); + + /* Nested VM can receive #VMEXIT instead of triggering #GP */ + kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); } /* CPUID 0x80000008 */ @@ -2202,6 +2205,11 @@ static int svm_instr_opcode(struct kvm_vcpu *vcpu) static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) { + const int guest_mode_exit_codes[] = { + [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, + [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, + [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, + }; int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = { [SVM_INSTR_VMRUN] = vmrun_interception, [SVM_INSTR_VMLOAD] = vmload_interception, @@ -2209,7 +2217,14 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) }; struct vcpu_svm *svm = to_svm(vcpu); - return svm_instr_handlers[opcode](svm); + if (is_guest_mode(vcpu)) { + svm->vmcb->control.exit_code = guest_mode_exit_codes[opcode]; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; + + return nested_svm_vmexit(svm); + } else + return svm_instr_handlers[opcode](svm); } /* @@ -2244,7 +2259,8 @@ static int gp_interception(struct vcpu_svm *svm) * VMware backdoor emulation on #GP interception only handles * IN{S}, OUT{S}, and RDPMC. */ - return kvm_emulate_instruction(vcpu, + if (!is_guest_mode(vcpu)) + return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); } else return emulate_svm_instr(vcpu, opcode); -- cgit v1.2.3 From 6e4e3b4df4e31ed679de994540f308306f12234b Mon Sep 17 00:00:00 2001 From: Cun Li Date: Mon, 11 Jan 2021 23:24:35 +0800 Subject: KVM: Stop using deprecated jump label APIs The use of 'struct static_key' and 'static_key_false' is deprecated. Use the new API. Signed-off-by: Cun Li Message-Id: <20210111152435.50275-1-cun.jia.li@gmail.com> [Make it compile. While at it, rename kvm_no_apic_vcpu to kvm_has_noapic_vcpu; the former reads too much like "true if no vCPU has an APIC". - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 25 +++++++++---------------- arch/x86/kvm/lapic.h | 13 ++++++------- arch/x86/kvm/mmu/mmu_audit.c | 8 ++++---- arch/x86/kvm/x86.c | 9 ++++----- 4 files changed, 23 insertions(+), 32 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 43cceadd073e..87f6b5a5a272 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -91,8 +91,8 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap) return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } -struct static_key_deferred apic_hw_disabled __read_mostly; -struct static_key_deferred apic_sw_disabled __read_mostly; +__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ); +__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ); static inline int apic_enabled(struct kvm_lapic *apic) { @@ -290,9 +290,9 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) if (enabled != apic->sw_enabled) { apic->sw_enabled = enabled; if (enabled) - static_key_slow_dec_deferred(&apic_sw_disabled); + static_branch_slow_dec_deferred(&apic_sw_disabled); else - static_key_slow_inc(&apic_sw_disabled.key); + static_branch_inc(&apic_sw_disabled.key); atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } @@ -2175,10 +2175,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) hrtimer_cancel(&apic->lapic_timer.timer); if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) - static_key_slow_dec_deferred(&apic_hw_disabled); + static_branch_slow_dec_deferred(&apic_hw_disabled); if (!apic->sw_enabled) - static_key_slow_dec_deferred(&apic_sw_disabled); + static_branch_slow_dec_deferred(&apic_sw_disabled); if (apic->regs) free_page((unsigned long)apic->regs); @@ -2250,9 +2250,9 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { if (value & MSR_IA32_APICBASE_ENABLE) { kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); - static_key_slow_dec_deferred(&apic_hw_disabled); + static_branch_slow_dec_deferred(&apic_hw_disabled); } else { - static_key_slow_inc(&apic_hw_disabled.key); + static_branch_inc(&apic_hw_disabled.key); atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } } @@ -2449,7 +2449,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) * thinking that APIC state has changed. */ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; - static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ + static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */ kvm_iodevice_init(&apic->dev, &apic_mmio_ops); return 0; @@ -2904,13 +2904,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) } } -void kvm_lapic_init(void) -{ - /* do not patch jump label more than once per second */ - jump_label_rate_limit(&apic_hw_disabled, HZ); - jump_label_rate_limit(&apic_sw_disabled, HZ); -} - void kvm_lapic_exit(void) { static_key_deferred_flush(&apic_hw_disabled); diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 4fb86e3a9dd3..ff4a7cc7d694 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -131,7 +131,6 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) } int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len); -void kvm_lapic_init(void); void kvm_lapic_exit(void); #define VEC_POS(v) ((v) & (32 - 1)) @@ -172,29 +171,29 @@ static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 va __kvm_lapic_set_reg(apic->regs, reg_off, val); } -extern struct static_key kvm_no_apic_vcpu; +DECLARE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu) { - if (static_key_false(&kvm_no_apic_vcpu)) + if (static_branch_unlikely(&kvm_has_noapic_vcpu)) return vcpu->arch.apic; return true; } -extern struct static_key_deferred apic_hw_disabled; +extern struct static_key_false_deferred apic_hw_disabled; static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic) { - if (static_key_false(&apic_hw_disabled.key)) + if (static_branch_unlikely(&apic_hw_disabled.key)) return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; return MSR_IA32_APICBASE_ENABLE; } -extern struct static_key_deferred apic_sw_disabled; +extern struct static_key_false_deferred apic_sw_disabled; static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic) { - if (static_key_false(&apic_sw_disabled.key)) + if (static_branch_unlikely(&apic_sw_disabled.key)) return apic->sw_enabled; return true; } diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c index c8d51a37e2ce..ced15fd58fde 100644 --- a/arch/x86/kvm/mmu/mmu_audit.c +++ b/arch/x86/kvm/mmu/mmu_audit.c @@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu) } static bool mmu_audit; -static struct static_key mmu_audit_key; +static DEFINE_STATIC_KEY_FALSE(mmu_audit_key); static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { @@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { - if (static_key_false((&mmu_audit_key))) + if (static_branch_unlikely((&mmu_audit_key))) __kvm_mmu_audit(vcpu, point); } @@ -259,7 +259,7 @@ static void mmu_audit_enable(void) if (mmu_audit) return; - static_key_slow_inc(&mmu_audit_key); + static_branch_inc(&mmu_audit_key); mmu_audit = true; } @@ -268,7 +268,7 @@ static void mmu_audit_disable(void) if (!mmu_audit) return; - static_key_slow_dec(&mmu_audit_key); + static_branch_dec(&mmu_audit_key); mmu_audit = false; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ae8efb0442cf..6d3bee0ea958 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7967,7 +7967,6 @@ int kvm_arch_init(void *opaque) supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; } - kvm_lapic_init(); if (pi_inject_timer == -1) pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER); #ifdef CONFIG_X86_64 @@ -9991,7 +9990,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (kvm_apicv_activated(vcpu->kvm)) vcpu->arch.apicv_active = true; } else - static_key_slow_inc(&kvm_no_apic_vcpu); + static_branch_inc(&kvm_has_noapic_vcpu); r = -ENOMEM; @@ -10120,7 +10119,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); kvfree(vcpu->arch.cpuid_entries); if (!lapic_in_kernel(vcpu)) - static_key_slow_dec(&kvm_no_apic_vcpu); + static_branch_dec(&kvm_has_noapic_vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) @@ -10375,8 +10374,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; } -struct static_key kvm_no_apic_vcpu __read_mostly; -EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); +__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); +EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { -- cgit v1.2.3 From b6a7cc35440e997a42fa23ad006d5d3ba768007c Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 14 Jan 2021 22:27:54 -0500 Subject: KVM: X86: prepend vmx/svm prefix to additional kvm_x86_ops functions A subsequent patch introduces macros in preparation for simplifying the definition for vmx_x86_ops and svm_x86_ops. Making the naming more uniform expands the coverage of the macros. Add vmx/svm prefix to the following functions: update_exception_bitmap(), enable_nmi_window(), enable_irq_window(), update_cr8_intercept and enable_smi_window(). Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 20 ++++++++++---------- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 30 +++++++++++++++--------------- arch/x86/kvm/vmx/vmx.h | 2 +- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8d18f01b29c3..504e9474547b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1829,7 +1829,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, vmcb_mark_dirty(svm->vmcb, VMCB_SEG); } -static void update_exception_bitmap(struct kvm_vcpu *vcpu) +static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3446,7 +3446,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu) SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3571,7 +3571,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !svm_interrupt_blocked(vcpu); } -static void enable_irq_window(struct kvm_vcpu *vcpu) +static void svm_enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3595,7 +3595,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) } } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4377,7 +4377,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) return ret; } -static void enable_smi_window(struct kvm_vcpu *vcpu) +static void svm_enable_smi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4531,7 +4531,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_blocking = svm_vcpu_blocking, .vcpu_unblocking = svm_vcpu_unblocking, - .update_exception_bitmap = update_exception_bitmap, + .update_exception_bitmap = svm_update_exception_bitmap, .get_msr_feature = svm_get_msr_feature, .get_msr = svm_get_msr, .set_msr = svm_set_msr, @@ -4574,9 +4574,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .nmi_allowed = svm_nmi_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, + .enable_nmi_window = svm_enable_nmi_window, + .enable_irq_window = svm_enable_irq_window, + .update_cr8_intercept = svm_update_cr8_intercept, .set_virtual_apic_mode = svm_set_virtual_apic_mode, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, @@ -4619,7 +4619,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .smi_allowed = svm_smi_allowed, .pre_enter_smm = svm_pre_enter_smm, .pre_leave_smm = svm_pre_leave_smm, - .enable_smi_window = enable_smi_window, + .enable_smi_window = svm_enable_smi_window, .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 94cd3f8dd428..459254c4a5f3 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2532,7 +2532,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 043a9bbd5ff1..3b8ada67e852 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -814,7 +814,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) return *p; } -void update_exception_bitmap(struct kvm_vcpu *vcpu) +void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; @@ -2788,7 +2788,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); @@ -2868,7 +2868,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); @@ -4519,23 +4519,23 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); if (init_event) vmx_clear_hlt(vcpu); } -static void enable_irq_window(struct kvm_vcpu *vcpu) +static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) { exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu) { if (!enable_vnmi || vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { - enable_irq_window(vcpu); + vmx_enable_irq_window(vcpu); return; } @@ -6210,7 +6210,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) : "eax", "ebx", "ecx", "edx"); } -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); int tpr_threshold; @@ -7339,7 +7339,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) set_cr4_guest_host_mask(vmx); /* Refresh #PF interception to account for MAXPHYADDR changes. */ - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); } static __init void vmx_set_cpu_caps(void) @@ -7629,7 +7629,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) return 0; } -static void enable_smi_window(struct kvm_vcpu *vcpu) +static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) { /* RSM will cause a vmexit anyway. */ } @@ -7689,7 +7689,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, - .update_exception_bitmap = update_exception_bitmap, + .update_exception_bitmap = vmx_update_exception_bitmap, .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, @@ -7732,9 +7732,9 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, + .enable_nmi_window = vmx_enable_nmi_window, + .enable_irq_window = vmx_enable_irq_window, + .update_cr8_intercept = vmx_update_cr8_intercept, .set_virtual_apic_mode = vmx_set_virtual_apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, @@ -7792,7 +7792,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .smi_allowed = vmx_smi_allowed, .pre_enter_smm = vmx_pre_enter_smm, .pre_leave_smm = vmx_pre_leave_smm, - .enable_smi_window = enable_smi_window, + .enable_smi_window = vmx_enable_smi_window, .can_emulate_instruction = vmx_can_emulate_instruction, .apic_init_signal_blocked = vmx_apic_init_signal_blocked, diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index b5679d1e02c4..f64c9b57ed00 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -378,7 +378,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, int root_level); -void update_exception_bitmap(struct kvm_vcpu *vcpu); +void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 9af5471bdbb2a26a1a46cd834e9fda6db6a9670e Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 14 Jan 2021 22:27:55 -0500 Subject: KVM: x86: introduce definitions to support static calls for kvm_x86_ops Use static calls to improve kvm_x86_ops performance. Introduce the definitions that will be used by a subsequent patch to actualize the savings. Add a new kvm-x86-ops.h header that can be used for the definition of static calls. This header is also intended to be used to simplify the defition of svm_kvm_ops and vmx_x86_ops. Note that all functions in kvm_x86_ops are covered here except for 'pmu_ops' and 'nested ops'. I think they can be covered by static calls in a simlilar manner, but were omitted from this series to reduce scope and because I don't think they have as large of a performance impact. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm-x86-ops.h | 127 +++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/kvm_host.h | 13 ++++ arch/x86/kvm/x86.c | 9 +++ 3 files changed, 149 insertions(+) create mode 100644 arch/x86/include/asm/kvm-x86-ops.h diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h new file mode 100644 index 000000000000..355a2ab8fc09 --- /dev/null +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL) +BUILD_BUG_ON(1) +#endif + +/* + * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate + * "static_call()"s. They are also intended for use when defining + * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those + * functions that follow the [svm|vmx]_func_name convention. + * KVM_X86_OP_NULL() can leave a NULL definition for the + * case where there is no definition or a function name that + * doesn't match the typical naming convention is supplied. + */ +KVM_X86_OP_NULL(hardware_enable) +KVM_X86_OP_NULL(hardware_disable) +KVM_X86_OP_NULL(hardware_unsetup) +KVM_X86_OP_NULL(cpu_has_accelerated_tpr) +KVM_X86_OP(has_emulated_msr) +KVM_X86_OP(vcpu_after_set_cpuid) +KVM_X86_OP(vm_init) +KVM_X86_OP_NULL(vm_destroy) +KVM_X86_OP(vcpu_create) +KVM_X86_OP(vcpu_free) +KVM_X86_OP(vcpu_reset) +KVM_X86_OP(prepare_guest_switch) +KVM_X86_OP(vcpu_load) +KVM_X86_OP(vcpu_put) +KVM_X86_OP(update_exception_bitmap) +KVM_X86_OP(get_msr) +KVM_X86_OP(set_msr) +KVM_X86_OP(get_segment_base) +KVM_X86_OP(get_segment) +KVM_X86_OP(get_cpl) +KVM_X86_OP(set_segment) +KVM_X86_OP_NULL(get_cs_db_l_bits) +KVM_X86_OP(set_cr0) +KVM_X86_OP(is_valid_cr4) +KVM_X86_OP(set_cr4) +KVM_X86_OP(set_efer) +KVM_X86_OP(get_idt) +KVM_X86_OP(set_idt) +KVM_X86_OP(get_gdt) +KVM_X86_OP(set_gdt) +KVM_X86_OP(sync_dirty_debug_regs) +KVM_X86_OP(set_dr7) +KVM_X86_OP(cache_reg) +KVM_X86_OP(get_rflags) +KVM_X86_OP(set_rflags) +KVM_X86_OP(tlb_flush_all) +KVM_X86_OP(tlb_flush_current) +KVM_X86_OP_NULL(tlb_remote_flush) +KVM_X86_OP_NULL(tlb_remote_flush_with_range) +KVM_X86_OP(tlb_flush_gva) +KVM_X86_OP(tlb_flush_guest) +KVM_X86_OP(run) +KVM_X86_OP_NULL(handle_exit) +KVM_X86_OP_NULL(skip_emulated_instruction) +KVM_X86_OP_NULL(update_emulated_instruction) +KVM_X86_OP(set_interrupt_shadow) +KVM_X86_OP(get_interrupt_shadow) +KVM_X86_OP(patch_hypercall) +KVM_X86_OP(set_irq) +KVM_X86_OP(set_nmi) +KVM_X86_OP(queue_exception) +KVM_X86_OP(cancel_injection) +KVM_X86_OP(interrupt_allowed) +KVM_X86_OP(nmi_allowed) +KVM_X86_OP(get_nmi_mask) +KVM_X86_OP(set_nmi_mask) +KVM_X86_OP(enable_nmi_window) +KVM_X86_OP(enable_irq_window) +KVM_X86_OP(update_cr8_intercept) +KVM_X86_OP(check_apicv_inhibit_reasons) +KVM_X86_OP_NULL(pre_update_apicv_exec_ctrl) +KVM_X86_OP(refresh_apicv_exec_ctrl) +KVM_X86_OP(hwapic_irr_update) +KVM_X86_OP(hwapic_isr_update) +KVM_X86_OP_NULL(guest_apic_has_interrupt) +KVM_X86_OP(load_eoi_exitmap) +KVM_X86_OP(set_virtual_apic_mode) +KVM_X86_OP_NULL(set_apic_access_page_addr) +KVM_X86_OP(deliver_posted_interrupt) +KVM_X86_OP_NULL(sync_pir_to_irr) +KVM_X86_OP(set_tss_addr) +KVM_X86_OP(set_identity_map_addr) +KVM_X86_OP(get_mt_mask) +KVM_X86_OP(load_mmu_pgd) +KVM_X86_OP_NULL(has_wbinvd_exit) +KVM_X86_OP(write_l1_tsc_offset) +KVM_X86_OP(get_exit_info) +KVM_X86_OP(check_intercept) +KVM_X86_OP(handle_exit_irqoff) +KVM_X86_OP_NULL(request_immediate_exit) +KVM_X86_OP(sched_in) +KVM_X86_OP_NULL(slot_enable_log_dirty) +KVM_X86_OP_NULL(slot_disable_log_dirty) +KVM_X86_OP_NULL(flush_log_dirty) +KVM_X86_OP_NULL(enable_log_dirty_pt_masked) +KVM_X86_OP_NULL(cpu_dirty_log_size) +KVM_X86_OP_NULL(pre_block) +KVM_X86_OP_NULL(post_block) +KVM_X86_OP_NULL(vcpu_blocking) +KVM_X86_OP_NULL(vcpu_unblocking) +KVM_X86_OP_NULL(update_pi_irte) +KVM_X86_OP_NULL(apicv_post_state_restore) +KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt) +KVM_X86_OP_NULL(set_hv_timer) +KVM_X86_OP_NULL(cancel_hv_timer) +KVM_X86_OP(setup_mce) +KVM_X86_OP(smi_allowed) +KVM_X86_OP(pre_enter_smm) +KVM_X86_OP(pre_leave_smm) +KVM_X86_OP(enable_smi_window) +KVM_X86_OP_NULL(mem_enc_op) +KVM_X86_OP_NULL(mem_enc_reg_region) +KVM_X86_OP_NULL(mem_enc_unreg_region) +KVM_X86_OP(get_msr_feature) +KVM_X86_OP(can_emulate_instruction) +KVM_X86_OP(apic_init_signal_blocked) +KVM_X86_OP_NULL(enable_direct_tlbflush) +KVM_X86_OP_NULL(migrate_timers) +KVM_X86_OP(msr_filter_changed) +KVM_X86_OP_NULL(complete_emulated_msr) + +#undef KVM_X86_OP +#undef KVM_X86_OP_NULL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 876ff1447337..25502e6a6bc0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1350,6 +1350,19 @@ extern u64 __read_mostly host_efer; extern bool __read_mostly allow_smaller_maxphyaddr; extern struct kvm_x86_ops kvm_x86_ops; +#define KVM_X86_OP(func) \ + DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); +#define KVM_X86_OP_NULL KVM_X86_OP +#include + +static inline void kvm_ops_static_call_update(void) +{ +#define KVM_X86_OP(func) \ + static_call_update(kvm_x86_##func, kvm_x86_ops.func); +#define KVM_X86_OP_NULL KVM_X86_OP +#include +} + #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6d3bee0ea958..a8b2aa9ed572 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -114,6 +114,15 @@ static int sync_regs(struct kvm_vcpu *vcpu); struct kvm_x86_ops kvm_x86_ops __read_mostly; EXPORT_SYMBOL_GPL(kvm_x86_ops); +#define KVM_X86_OP(func) \ + DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ + *(((struct kvm_x86_ops *)0)->func)); +#define KVM_X86_OP_NULL KVM_X86_OP +#include +EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); +EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); +EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current); + static bool __read_mostly ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); -- cgit v1.2.3 From b3646477d458fbe7694a15b9c78fbe2fa426b703 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 14 Jan 2021 22:27:56 -0500 Subject: KVM: x86: use static calls to reduce kvm_x86_ops overhead Convert kvm_x86_ops to use static calls. Note that all kvm_x86_ops are covered here except for 'pmu_ops and 'nested ops'. Here are some numbers running cpuid in a loop of 1 million calls averaged over 5 runs, measured in the vm (lower is better). Intel Xeon 3000MHz: |default |mitigations=off ------------------------------------- vanilla |.671s |.486s static call|.573s(-15%)|.458s(-6%) AMD EPYC 2500MHz: |default |mitigations=off ------------------------------------- vanilla |.710s |.609s static call|.664s(-6%) |.609s(0%) Cc: Paolo Bonzini Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: Sean Christopherson Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 8 +- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/hyperv.c | 4 +- arch/x86/kvm/irq.c | 3 +- arch/x86/kvm/kvm_cache_regs.h | 10 +- arch/x86/kvm/lapic.c | 30 ++-- arch/x86/kvm/mmu.h | 6 +- arch/x86/kvm/mmu/mmu.c | 15 +- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/trace.h | 4 +- arch/x86/kvm/x86.c | 298 ++++++++++++++++++++-------------------- arch/x86/kvm/x86.h | 6 +- 13 files changed, 193 insertions(+), 197 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 25502e6a6bc0..fa7b2df6422b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1374,7 +1374,7 @@ void kvm_arch_free_vm(struct kvm *kvm); static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) { if (kvm_x86_ops.tlb_remote_flush && - !kvm_x86_ops.tlb_remote_flush(kvm)) + !static_call(kvm_x86_tlb_remote_flush)(kvm)) return 0; else return -ENOTSUPP; @@ -1767,14 +1767,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) { - if (kvm_x86_ops.vcpu_blocking) - kvm_x86_ops.vcpu_blocking(vcpu); + static_call_cond(kvm_x86_vcpu_blocking)(vcpu); } static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) { - if (kvm_x86_ops.vcpu_unblocking) - kvm_x86_ops.vcpu_unblocking(vcpu); + static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); } static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ce658c9fa002..ff16734e6b6a 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -182,7 +182,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); /* Invoke the vendor callback only after the above state is updated. */ - kvm_x86_ops.vcpu_after_set_cpuid(vcpu); + static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); } static int is_efer_nx(void) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 922c69dcca4d..5c45d80a7ce3 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1154,7 +1154,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; - kvm_x86_ops.patch_hypercall(vcpu, instructions); + static_call(kvm_x86_patch_hypercall)(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; @@ -1745,7 +1745,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ - if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 814698e5b152..a035cca82f8f 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -143,8 +143,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { __kvm_migrate_apic_timer(vcpu); __kvm_migrate_pit_timer(vcpu); - if (kvm_x86_ops.migrate_timers) - kvm_x86_ops.migrate_timers(vcpu); + static_call_cond(kvm_x86_migrate_timers)(vcpu); } bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index a889563ad02d..2e11da2f5621 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) return 0; if (!kvm_register_is_available(vcpu, reg)) - kvm_x86_ops.cache_reg(vcpu, reg); + static_call(kvm_x86_cache_reg)(vcpu, reg); return vcpu->arch.regs[reg]; } @@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) might_sleep(); /* on svm */ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; } @@ -118,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; if ((tmask & vcpu->arch.cr0_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); return vcpu->arch.cr0 & mask; } @@ -132,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; if ((tmask & vcpu->arch.cr4_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); return vcpu->arch.cr4 & mask; } static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); return vcpu->arch.cr3; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 87f6b5a5a272..9e0f78c0a256 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -484,7 +484,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) if (unlikely(vcpu->arch.apicv_active)) { /* need to update RVI */ kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); - kvm_x86_ops.hwapic_irr_update(vcpu, + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); } else { apic->irr_pending = false; @@ -515,7 +515,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) * just set SVI. */ if (unlikely(vcpu->arch.apicv_active)) - kvm_x86_ops.hwapic_isr_update(vcpu, vec); + static_call(kvm_x86_hwapic_isr_update)(vcpu, vec); else { ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); @@ -563,8 +563,8 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) * and must be left alone. */ if (unlikely(vcpu->arch.apicv_active)) - kvm_x86_ops.hwapic_isr_update(vcpu, - apic_find_highest_isr(apic)); + static_call(kvm_x86_hwapic_isr_update)(vcpu, + apic_find_highest_isr(apic)); else { --apic->isr_count; BUG_ON(apic->isr_count < 0); @@ -701,7 +701,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; if (apic->vcpu->arch.apicv_active) - highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu); + highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) @@ -1090,7 +1090,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, apic->regs + APIC_TMR); } - if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) { + if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) { kvm_lapic_set_irr(vector, apic); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); @@ -1814,7 +1814,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic) { WARN_ON(preemptible()); WARN_ON(!apic->lapic_timer.hv_timer_in_use); - kvm_x86_ops.cancel_hv_timer(apic->vcpu); + static_call(kvm_x86_cancel_hv_timer)(apic->vcpu); apic->lapic_timer.hv_timer_in_use = false; } @@ -1831,7 +1831,7 @@ static bool start_hv_timer(struct kvm_lapic *apic) if (!ktimer->tscdeadline) return false; - if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) + if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired)) return false; ktimer->hv_timer_in_use = true; @@ -2261,7 +2261,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) - kvm_x86_ops.set_virtual_apic_mode(vcpu); + static_call(kvm_x86_set_virtual_apic_mode)(vcpu); apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; @@ -2338,9 +2338,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); if (vcpu->arch.apicv_active) { - kvm_x86_ops.apicv_post_state_restore(vcpu); - kvm_x86_ops.hwapic_irr_update(vcpu, -1); - kvm_x86_ops.hwapic_isr_update(vcpu, -1); + static_call(kvm_x86_apicv_post_state_restore)(vcpu); + static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); + static_call(kvm_x86_hwapic_isr_update)(vcpu, -1); } vcpu->arch.apic_arb_prio = 0; @@ -2601,10 +2601,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) kvm_apic_update_apicv(vcpu); apic->highest_isr_cache = -1; if (vcpu->arch.apicv_active) { - kvm_x86_ops.apicv_post_state_restore(vcpu); - kvm_x86_ops.hwapic_irr_update(vcpu, + static_call(kvm_x86_apicv_post_state_restore)(vcpu); + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); - kvm_x86_ops.hwapic_isr_update(vcpu, + static_call(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 2bb42eee5d5d..c68bfc3e2402 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -102,7 +102,7 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) if (!VALID_PAGE(root_hpa)) return; - kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu), + static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu), vcpu->arch.mmu->shadow_root_level); } @@ -174,8 +174,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey, unsigned pfec) { - int cpl = kvm_x86_ops.get_cpl(vcpu); - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + int cpl = static_call(kvm_x86_get_cpl)(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); /* * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8740ac1a48cb..bc63406850c9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -190,7 +190,7 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, int ret = -ENOTSUPP; if (range && kvm_x86_ops.tlb_remote_flush_with_range) - ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); + ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range); if (ret) kvm_flush_remote_tlbs(kvm); @@ -1283,8 +1283,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gfn_t gfn_offset, unsigned long mask) { if (kvm_x86_ops.enable_log_dirty_pt_masked) - kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, - mask); + static_call(kvm_x86_enable_log_dirty_pt_masked)(kvm, slot, + gfn_offset, + mask); else kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } @@ -1292,7 +1293,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, int kvm_cpu_dirty_log_size(void) { if (kvm_x86_ops.cpu_dirty_log_size) - return kvm_x86_ops.cpu_dirty_log_size(); + return static_call(kvm_x86_cpu_dirty_log_size)(); return 0; } @@ -4799,7 +4800,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) if (r) goto out; kvm_mmu_load_pgd(vcpu); - kvm_x86_ops.tlb_flush_current(vcpu); + static_call(kvm_x86_tlb_flush_current)(vcpu); out: return r; } @@ -5080,7 +5081,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, if (is_noncanonical_address(gva, vcpu)) return; - kvm_x86_ops.tlb_flush_gva(vcpu, gva); + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); } if (!mmu->invlpg) @@ -5137,7 +5138,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) } if (tlb_flush) - kvm_x86_ops.tlb_flush_gva(vcpu, gva); + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); ++vcpu->stat.invlpg; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index c51ad544f25b..ef55f0bc4ccf 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -120,7 +120,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (level > PG_LEVEL_4K) spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) - spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, + spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); if (host_writable) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 136dc2f3c5d3..827886c12c16 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -373,7 +373,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) return 1; if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) && - (kvm_x86_ops.get_cpl(vcpu) != 0) && + (static_call(kvm_x86_get_cpl)(vcpu) != 0) && (kvm_read_cr0(vcpu) & X86_CR0_PE)) return 1; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 2de30c20bc26..5ef238621881 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -256,7 +256,7 @@ TRACE_EVENT(name, \ __entry->guest_rip = kvm_rip_read(vcpu); \ __entry->isa = isa; \ __entry->vcpu_id = vcpu->vcpu_id; \ - kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \ + static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \ &__entry->info2, \ &__entry->intr_info, \ &__entry->error_code); \ @@ -738,7 +738,7 @@ TRACE_EVENT(kvm_emulate_insn, ), TP_fast_assign( - __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS); + __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr - vcpu->arch.emulate_ctxt->fetch.data; __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a8b2aa9ed572..4758afe59795 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -708,7 +708,7 @@ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { - if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) + if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; @@ -868,7 +868,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!is_pae(vcpu)) return 1; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } @@ -881,7 +881,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; - kvm_x86_ops.set_cr0(vcpu, cr0); + static_call(kvm_x86_set_cr0)(vcpu, cr0); kvm_post_set_cr0(vcpu, old_cr0, cr0); @@ -986,7 +986,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (kvm_x86_ops.get_cpl(vcpu) != 0 || + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; @@ -1003,7 +1003,7 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) return false; - return kvm_x86_ops.is_valid_cr4(vcpu, cr4); + return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); } EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); @@ -1047,7 +1047,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } - kvm_x86_ops.set_cr4(vcpu, cr4); + static_call(kvm_x86_set_cr4)(vcpu, cr4); kvm_post_set_cr4(vcpu, old_cr4, cr4); @@ -1130,7 +1130,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; - kvm_x86_ops.set_dr7(vcpu, dr7); + static_call(kvm_x86_set_dr7)(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; @@ -1442,7 +1442,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) rdmsrl_safe(msr->index, &msr->data); break; default: - return kvm_x86_ops.get_msr_feature(msr); + return static_call(kvm_x86_get_msr_feature)(msr); } return 0; } @@ -1518,7 +1518,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; - r = kvm_x86_ops.set_efer(vcpu, efer); + r = static_call(kvm_x86_set_efer)(vcpu, efer); if (r) { WARN_ON(r > 0); return r; @@ -1615,7 +1615,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, msr.index = index; msr.host_initiated = host_initiated; - return kvm_x86_ops.set_msr(vcpu, &msr); + return static_call(kvm_x86_set_msr)(vcpu, &msr); } static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, @@ -1648,7 +1648,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, msr.index = index; msr.host_initiated = host_initiated; - ret = kvm_x86_ops.get_msr(vcpu, &msr); + ret = static_call(kvm_x86_get_msr)(vcpu, &msr); if (!ret) *data = msr.data; return ret; @@ -1689,12 +1689,12 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); } - return kvm_x86_ops.complete_emulated_msr(vcpu, err); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); } static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); } static u64 kvm_msr_reason(int r) @@ -1766,7 +1766,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_read_ex(ecx); } - return kvm_x86_ops.complete_emulated_msr(vcpu, r); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); @@ -1792,7 +1792,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) else trace_kvm_msr_write_ex(ecx, data); - return kvm_x86_ops.complete_emulated_msr(vcpu, r); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); @@ -2224,7 +2224,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { vcpu->arch.l1_tsc_offset = offset; - vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); + vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset); } static inline bool kvm_check_tsc_unstable(void) @@ -2970,13 +2970,13 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_all(vcpu); + static_call(kvm_x86_tlb_flush_all)(vcpu); } static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_guest(vcpu); + static_call(kvm_x86_tlb_flush_guest)(vcpu); } static void record_steal_time(struct kvm_vcpu *vcpu) @@ -3802,10 +3802,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * fringe case that is not enabled except via specific settings * of the module parameters. */ - r = kvm_x86_ops.has_emulated_msr(kvm, MSR_IA32_SMBASE); + r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); break; case KVM_CAP_VAPIC: - r = !kvm_x86_ops.cpu_has_accelerated_tpr(); + r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; @@ -3971,14 +3971,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { - if (kvm_x86_ops.has_wbinvd_exit()) + if (static_call(kvm_x86_has_wbinvd_exit)()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } - kvm_x86_ops.vcpu_load(vcpu, cpu); + static_call(kvm_x86_vcpu_load)(vcpu, cpu); /* Save host pkru register if supported */ vcpu->arch.host_pkru = read_pkru(); @@ -4056,10 +4056,10 @@ out: void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { if (vcpu->preempted && !vcpu->arch.guest_state_protected) - vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); + vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); kvm_steal_time_set_preempted(vcpu); - kvm_x86_ops.vcpu_put(vcpu); + static_call(kvm_x86_vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* * If userspace has set any breakpoints or watchpoints, dr6 is restored @@ -4073,7 +4073,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { if (vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); return kvm_apic_get_state(vcpu, s); } @@ -4183,7 +4183,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; - kvm_x86_ops.setup_mce(vcpu); + static_call(kvm_x86_setup_mce)(vcpu); out: return r; } @@ -4290,11 +4290,11 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; - events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); + events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; - events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); + events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ @@ -4361,13 +4361,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) - kvm_x86_ops.set_interrupt_shadow(vcpu, - events->interrupt.shadow); + static_call(kvm_x86_set_interrupt_shadow)(vcpu, + events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; - kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); + static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && lapic_in_kernel(vcpu)) @@ -4662,7 +4662,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, if (!kvm_x86_ops.enable_direct_tlbflush) return -ENOTTY; - return kvm_x86_ops.enable_direct_tlbflush(vcpu); + return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: vcpu->arch.pv_cpuid.enforce = cap->args[0]; @@ -5054,14 +5054,14 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; - ret = kvm_x86_ops.set_tss_addr(kvm, addr); + ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { - return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr); + return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, @@ -5218,8 +5218,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) /* * Flush potentially hardware-cached dirty pages to dirty_bitmap. */ - if (kvm_x86_ops.flush_log_dirty) - kvm_x86_ops.flush_log_dirty(kvm); + static_call_cond(kvm_x86_flush_log_dirty)(kvm); } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, @@ -5701,7 +5700,7 @@ set_pit2_out: case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (kvm_x86_ops.mem_enc_op) - r = kvm_x86_ops.mem_enc_op(kvm, argp); + r = static_call(kvm_x86_mem_enc_op)(kvm, argp); break; } case KVM_MEMORY_ENCRYPT_REG_REGION: { @@ -5713,7 +5712,7 @@ set_pit2_out: r = -ENOTTY; if (kvm_x86_ops.mem_enc_reg_region) - r = kvm_x86_ops.mem_enc_reg_region(kvm, ®ion); + r = static_call(kvm_x86_mem_enc_reg_region)(kvm, ®ion); break; } case KVM_MEMORY_ENCRYPT_UNREG_REGION: { @@ -5725,7 +5724,7 @@ set_pit2_out: r = -ENOTTY; if (kvm_x86_ops.mem_enc_unreg_region) - r = kvm_x86_ops.mem_enc_unreg_region(kvm, ®ion); + r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, ®ion); break; } case KVM_HYPERV_EVENTFD: { @@ -5827,7 +5826,7 @@ static void kvm_init_msr_list(void) } for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { - if (!kvm_x86_ops.has_emulated_msr(NULL, emulated_msrs_all[i])) + if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) continue; emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; @@ -5890,13 +5889,13 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - kvm_x86_ops.set_segment(vcpu, var, seg); + static_call(kvm_x86_set_segment)(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - kvm_x86_ops.get_segment(vcpu, var, seg); + static_call(kvm_x86_get_segment)(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, @@ -5916,14 +5915,14 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } @@ -5931,7 +5930,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } @@ -5980,7 +5979,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; @@ -6005,7 +6004,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; /* * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED @@ -6026,7 +6025,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = 0; - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); @@ -6079,7 +6078,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = PFERR_WRITE_MASK; - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, @@ -6104,7 +6103,7 @@ int handle_ud(struct kvm_vcpu *vcpu) char sig[5]; /* ud2; .ascii "kvm" */ struct x86_exception e; - if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0))) + if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0))) return 1; if (force_emulation_prefix && @@ -6138,7 +6137,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { - u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) + u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); /* @@ -6546,7 +6545,7 @@ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { - return kvm_x86_ops.get_segment_base(vcpu, seg); + return static_call(kvm_x86_get_segment_base)(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) @@ -6559,7 +6558,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; - if (kvm_x86_ops.has_wbinvd_exit()) { + if (static_call(kvm_x86_has_wbinvd_exit)()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); @@ -6664,27 +6663,27 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { - return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt)); + return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( @@ -6826,7 +6825,7 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { - return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage, + return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, &ctxt->exception); } @@ -6864,7 +6863,7 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) { - kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked); + static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); } static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) @@ -6880,7 +6879,7 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, const char *smstate) { - return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate); + return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate); } static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt) @@ -6942,7 +6941,7 @@ static const struct x86_emulate_ops emulate_ops = { static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { - u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); + u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or @@ -6953,7 +6952,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { - kvm_x86_ops.set_interrupt_shadow(vcpu, mask); + static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } @@ -6995,7 +6994,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; int cs_db, cs_l; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); ctxt->gpa_available = false; ctxt->eflags = kvm_get_rflags(vcpu); @@ -7056,7 +7055,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) kvm_queue_exception(vcpu, UD_VECTOR); - if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { + if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; @@ -7237,10 +7236,10 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) { - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); int r; - r = kvm_x86_ops.skip_emulated_instruction(vcpu); + r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); if (unlikely(!r)) return 0; @@ -7370,7 +7369,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, bool writeback = true; bool write_fault_to_spt; - if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len))) + if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len))) return 1; vcpu->arch.l1tf_flush_l1d = true; @@ -7493,7 +7492,7 @@ restart: r = 1; if (writeback) { - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; if (!ctxt->have_exception || @@ -7502,7 +7501,7 @@ restart: if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); if (kvm_x86_ops.update_emulated_instruction) - kvm_x86_ops.update_emulated_instruction(vcpu); + static_call(kvm_x86_update_emulated_instruction)(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -7831,7 +7830,7 @@ static int kvm_is_user_mode(void) int user_mode = 3; if (__this_cpu_read(current_vcpu)) - user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu)); + user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu)); return user_mode != 0; } @@ -8164,7 +8163,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (kvm_x86_ops.get_cpl(vcpu) != 0) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { ret = -KVM_EPERM; goto out; } @@ -8221,7 +8220,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); - kvm_x86_ops.patch_hypercall(vcpu, instruction); + static_call(kvm_x86_patch_hypercall)(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, &ctxt->exception); @@ -8278,7 +8277,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) tpr = kvm_lapic_get_cr8(vcpu); - kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); + static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) @@ -8289,7 +8288,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit /* try to reinject previous events if any */ if (vcpu->arch.exception.injected) { - kvm_x86_ops.queue_exception(vcpu); + static_call(kvm_x86_queue_exception)(vcpu); can_inject = false; } /* @@ -8308,10 +8307,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit */ else if (!vcpu->arch.exception.pending) { if (vcpu->arch.nmi_injected) { - kvm_x86_ops.set_nmi(vcpu); + static_call(kvm_x86_set_nmi)(vcpu); can_inject = false; } else if (vcpu->arch.interrupt.injected) { - kvm_x86_ops.set_irq(vcpu); + static_call(kvm_x86_set_irq)(vcpu); can_inject = false; } } @@ -8352,7 +8351,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit } } - kvm_x86_ops.queue_exception(vcpu); + static_call(kvm_x86_queue_exception)(vcpu); can_inject = false; } @@ -8368,7 +8367,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit * The kvm_x86_ops hooks communicate this by returning -EBUSY. */ if (vcpu->arch.smi_pending) { - r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { @@ -8377,35 +8376,35 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit enter_smm(vcpu); can_inject = false; } else - kvm_x86_ops.enable_smi_window(vcpu); + static_call(kvm_x86_enable_smi_window)(vcpu); } if (vcpu->arch.nmi_pending) { - r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; - kvm_x86_ops.set_nmi(vcpu); + static_call(kvm_x86_set_nmi)(vcpu); can_inject = false; - WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); + WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); } if (vcpu->arch.nmi_pending) - kvm_x86_ops.enable_nmi_window(vcpu); + static_call(kvm_x86_enable_nmi_window)(vcpu); } if (kvm_cpu_has_injectable_intr(vcpu)) { - r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); - kvm_x86_ops.set_irq(vcpu); - WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); + static_call(kvm_x86_set_irq)(vcpu); + WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); } if (kvm_cpu_has_injectable_intr(vcpu)) - kvm_x86_ops.enable_irq_window(vcpu); + static_call(kvm_x86_enable_irq_window)(vcpu); } if (is_guest_mode(vcpu) && @@ -8430,7 +8429,7 @@ static void process_nmi(struct kvm_vcpu *vcpu) * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ - if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) + if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); @@ -8520,11 +8519,11 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7f7c, seg.limit); put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); put_smstate(u32, buf, 0x7f74, dt.address); put_smstate(u32, buf, 0x7f70, dt.size); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); put_smstate(u32, buf, 0x7f58, dt.address); put_smstate(u32, buf, 0x7f54, dt.size); @@ -8574,7 +8573,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7e94, seg.limit); put_smstate(u64, buf, 0x7e98, seg.base); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); put_smstate(u32, buf, 0x7e84, dt.size); put_smstate(u64, buf, 0x7e88, dt.address); @@ -8584,7 +8583,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7e74, seg.limit); put_smstate(u64, buf, 0x7e78, seg.base); - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); put_smstate(u32, buf, 0x7e64, dt.size); put_smstate(u64, buf, 0x7e68, dt.address); @@ -8614,28 +8613,28 @@ static void enter_smm(struct kvm_vcpu *vcpu) * vCPU state (e.g. leave guest mode) after we've saved the state into * the SMM state-save area. */ - kvm_x86_ops.pre_enter_smm(vcpu, buf); + static_call(kvm_x86_pre_enter_smm)(vcpu, buf); vcpu->arch.hflags |= HF_SMM_MASK; kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); - if (kvm_x86_ops.get_nmi_mask(vcpu)) + if (static_call(kvm_x86_get_nmi_mask)(vcpu)) vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else - kvm_x86_ops.set_nmi_mask(vcpu, true); + static_call(kvm_x86_set_nmi_mask)(vcpu, true); kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_rip_write(vcpu, 0x8000); cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); - kvm_x86_ops.set_cr0(vcpu, cr0); + static_call(kvm_x86_set_cr0)(vcpu, cr0); vcpu->arch.cr0 = cr0; - kvm_x86_ops.set_cr4(vcpu, 0); + static_call(kvm_x86_set_cr4)(vcpu, 0); /* Undocumented: IDT limit is set to zero on entry to SMM. */ dt.address = dt.size = 0; - kvm_x86_ops.set_idt(vcpu, &dt); + static_call(kvm_x86_set_idt)(vcpu, &dt); __kvm_set_dr(vcpu, 7, DR7_FIXED_1); @@ -8666,7 +8665,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - kvm_x86_ops.set_efer(vcpu, 0); + static_call(kvm_x86_set_efer)(vcpu, 0); #endif kvm_update_cpuid_runtime(vcpu); @@ -8704,7 +8703,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); kvm_apic_update_apicv(vcpu); - kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); + static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); @@ -8721,7 +8720,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) unsigned long old, new, expected; if (!kvm_x86_ops.check_apicv_inhibit_reasons || - !kvm_x86_ops.check_apicv_inhibit_reasons(bit)) + !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) return; old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); @@ -8741,7 +8740,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) trace_kvm_apicv_update_request(activate, bit); if (kvm_x86_ops.pre_update_apicv_exec_ctrl) - kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate); + static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate); /* * Sending request to update APICV for all other vcpus, @@ -8767,7 +8766,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { if (vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); if (ioapic_in_kernel(vcpu->kvm)) kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -8787,7 +8786,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, vcpu_to_synic(vcpu)->vec_bitmap, 256); - kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); } void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, @@ -8812,7 +8811,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) if (!kvm_x86_ops.set_apic_access_page_addr) return; - kvm_x86_ops.set_apic_access_page_addr(vcpu); + static_call(kvm_x86_set_apic_access_page_addr)(vcpu); } void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -8955,7 +8954,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) kvm_check_async_pf_completion(vcpu); if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) - kvm_x86_ops.msr_filter_changed(vcpu); + static_call(kvm_x86_msr_filter_changed)(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { @@ -8968,7 +8967,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) inject_pending_event(vcpu, &req_immediate_exit); if (req_int_win) - kvm_x86_ops.enable_irq_window(vcpu); + static_call(kvm_x86_enable_irq_window)(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); @@ -8983,7 +8982,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) preempt_disable(); - kvm_x86_ops.prepare_guest_switch(vcpu); + static_call(kvm_x86_prepare_guest_switch)(vcpu); /* * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt @@ -9014,7 +9013,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * notified with kvm_vcpu_kick. */ if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -9028,7 +9027,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (req_immediate_exit) { kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_x86_ops.request_immediate_exit(vcpu); + static_call(kvm_x86_request_immediate_exit)(vcpu); } fpregs_assert_state_consistent(); @@ -9045,7 +9044,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; } - exit_fastpath = kvm_x86_ops.run(vcpu); + exit_fastpath = static_call(kvm_x86_run)(vcpu); /* * Do this here before restoring debug registers on the host. And @@ -9055,7 +9054,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); - kvm_x86_ops.sync_dirty_debug_regs(vcpu); + static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); kvm_update_dr0123(vcpu); kvm_update_dr7(vcpu); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; @@ -9077,7 +9076,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - kvm_x86_ops.handle_exit_irqoff(vcpu); + static_call(kvm_x86_handle_exit_irqoff)(vcpu); /* * Consume any pending interrupts, including the possible source of @@ -9119,13 +9118,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); - r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); + r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); return r; cancel_injection: if (req_immediate_exit) kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_x86_ops.cancel_injection(vcpu); + static_call(kvm_x86_cancel_injection)(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: @@ -9135,13 +9134,13 @@ out: static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) { if (!kvm_arch_vcpu_runnable(vcpu) && - (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { + (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_x86_ops.post_block) - kvm_x86_ops.post_block(vcpu); + static_call(kvm_x86_post_block)(vcpu); if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) return 1; @@ -9537,10 +9536,10 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; @@ -9693,10 +9692,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) dt.size = sregs->idt.limit; dt.address = sregs->idt.base; - kvm_x86_ops.set_idt(vcpu, &dt); + static_call(kvm_x86_set_idt)(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; - kvm_x86_ops.set_gdt(vcpu, &dt); + static_call(kvm_x86_set_gdt)(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; @@ -9706,14 +9705,14 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; - kvm_x86_ops.set_efer(vcpu, sregs->efer); + static_call(kvm_x86_set_efer)(vcpu, sregs->efer); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; - kvm_x86_ops.set_cr0(vcpu, sregs->cr0); + static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; - kvm_x86_ops.set_cr4(vcpu, sregs->cr4); + static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { @@ -9821,7 +9820,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, */ kvm_set_rflags(vcpu, rflags); - kvm_x86_ops.update_exception_bitmap(vcpu); + static_call(kvm_x86_update_exception_bitmap)(vcpu); r = 0; @@ -10048,7 +10047,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_hv_vcpu_init(vcpu); - r = kvm_x86_ops.vcpu_create(vcpu); + r = static_call(kvm_x86_vcpu_create)(vcpu); if (r) goto free_guest_fpu; @@ -10111,7 +10110,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvmclock_reset(vcpu); - kvm_x86_ops.vcpu_free(vcpu); + static_call(kvm_x86_vcpu_free)(vcpu); kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); @@ -10200,7 +10199,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.ia32_xss = 0; - kvm_x86_ops.vcpu_reset(vcpu, init_event); + static_call(kvm_x86_vcpu_reset)(vcpu, init_event); } void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) @@ -10226,7 +10225,7 @@ int kvm_arch_hardware_enable(void) bool stable, backwards_tsc = false; kvm_user_return_msr_cpu_online(); - ret = kvm_x86_ops.hardware_enable(); + ret = static_call(kvm_x86_hardware_enable)(); if (ret != 0) return ret; @@ -10308,7 +10307,7 @@ int kvm_arch_hardware_enable(void) void kvm_arch_hardware_disable(void) { - kvm_x86_ops.hardware_disable(); + static_call(kvm_x86_hardware_disable)(); drop_user_return_notifiers(); } @@ -10327,6 +10326,7 @@ int kvm_arch_hardware_setup(void *opaque) return r; memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); + kvm_ops_static_call_update(); if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) supported_xss = 0; @@ -10355,7 +10355,7 @@ int kvm_arch_hardware_setup(void *opaque) void kvm_arch_hardware_unsetup(void) { - kvm_x86_ops.hardware_unsetup(); + static_call(kvm_x86_hardware_unsetup)(); } int kvm_arch_check_processor_compat(void *opaque) @@ -10395,7 +10395,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) pmu->need_cleanup = true; kvm_make_request(KVM_REQ_PMU, vcpu); } - kvm_x86_ops.sched_in(vcpu, cpu); + static_call(kvm_x86_sched_in)(vcpu, cpu); } void kvm_arch_free_vm(struct kvm *kvm) @@ -10439,7 +10439,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm); - return kvm_x86_ops.vm_init(kvm); + return static_call(kvm_x86_vm_init)(kvm); } int kvm_arch_post_init_vm(struct kvm *kvm) @@ -10584,8 +10584,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); mutex_unlock(&kvm->slots_lock); } - if (kvm_x86_ops.vm_destroy) - kvm_x86_ops.vm_destroy(kvm); + static_call_cond(kvm_x86_vm_destroy)(kvm); for (i = 0; i < kvm->arch.msr_filter.count; i++) kfree(kvm->arch.msr_filter.ranges[i].bitmap); kvm_pic_destroy(kvm); @@ -10776,7 +10775,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, */ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { if (kvm_x86_ops.slot_enable_log_dirty) { - kvm_x86_ops.slot_enable_log_dirty(kvm, new); + static_call(kvm_x86_slot_enable_log_dirty)(kvm, new); } else { int level = kvm_dirty_log_manual_protect_and_init_set(kvm) ? @@ -10793,8 +10792,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, kvm_mmu_slot_remove_write_access(kvm, new, level); } } else { - if (kvm_x86_ops.slot_disable_log_dirty) - kvm_x86_ops.slot_disable_log_dirty(kvm, new); + static_call_cond(kvm_x86_slot_disable_log_dirty)(kvm, new); } } @@ -10833,7 +10831,7 @@ static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) { return (is_guest_mode(vcpu) && kvm_x86_ops.guest_apic_has_interrupt && - kvm_x86_ops.guest_apic_has_interrupt(vcpu)); + static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); } static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) @@ -10852,12 +10850,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_NMI, vcpu) || (vcpu->arch.nmi_pending && - kvm_x86_ops.nmi_allowed(vcpu, false))) + static_call(kvm_x86_nmi_allowed)(vcpu, false))) return true; if (kvm_test_request(KVM_REQ_SMI, vcpu) || (vcpu->arch.smi_pending && - kvm_x86_ops.smi_allowed(vcpu, false))) + static_call(kvm_x86_smi_allowed)(vcpu, false))) return true; if (kvm_arch_interrupt_allowed(vcpu) && @@ -10891,7 +10889,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) kvm_test_request(KVM_REQ_EVENT, vcpu)) return true; - if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) + if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) return true; return false; @@ -10909,7 +10907,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.interrupt_allowed(vcpu, false); + return static_call(kvm_x86_interrupt_allowed)(vcpu, false); } unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) @@ -10935,7 +10933,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; - rflags = kvm_x86_ops.get_rflags(vcpu); + rflags = static_call(kvm_x86_get_rflags)(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; @@ -10947,7 +10945,7 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; - kvm_x86_ops.set_rflags(vcpu, rflags); + static_call(kvm_x86_set_rflags)(vcpu, rflags); } void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) @@ -11077,7 +11075,7 @@ static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) return false; if (!kvm_pv_async_pf_enabled(vcpu) || - (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) + (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0)) return false; return true; @@ -11222,7 +11220,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, irqfd->producer = prod; kvm_arch_start_assignment(irqfd->kvm); - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 1); if (ret) @@ -11247,7 +11245,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, * when the irq is masked/disabled or the consumer side (KVM * int this case doesn't want to receive the interrupts. */ - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); if (ret) printk(KERN_INFO "irq bypass consumer (token %p) unregistration" " fails: %d\n", irqfd->consumer.token, ret); @@ -11258,7 +11256,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { - return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set); + return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set); } bool kvm_vector_hashing_enabled(void) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f88045a8af53..5f7c224f4bf2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -98,7 +98,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) if (!is_long_mode(vcpu)) return false; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); return cs_l; } @@ -129,7 +129,7 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_current(vcpu); + static_call(kvm_x86_tlb_flush_current)(vcpu); } static inline int is_pae(struct kvm_vcpu *vcpu) @@ -244,7 +244,7 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) { - return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); + return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); } void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); -- cgit v1.2.3 From aec511ad153556640fb1de38bfe00c69464f997f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 30 Dec 2020 16:26:54 -0800 Subject: x86/virt: Eat faults on VMXOFF in reboot flows Silently ignore all faults on VMXOFF in the reboot flows as such faults are all but guaranteed to be due to the CPU not being in VMX root. Because (a) VMXOFF may be executed in NMI context, e.g. after VMXOFF but before CR4.VMXE is cleared, (b) there's no way to query the CPU's VMX state without faulting, and (c) the whole point is to get out of VMX root, eating faults is the simplest way to achieve the desired behaior. Technically, VMXOFF can fault (or fail) for other reasons, but all other fault and failure scenarios are mode related, i.e. the kernel would have to magically end up in RM, V86, compat mode, at CPL>0, or running with the SMI Transfer Monitor active. The kernel is beyond hosed if any of those scenarios are encountered; trying to do something fancy in the error path to handle them cleanly is pointless. Fixes: 1e9931146c74 ("x86: asm/virtext.h: add cpu_vmxoff() inline function") Reported-by: David P. Reed Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/virtext.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 9aad0e0876fb..fda3e7747c22 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void) } -/** Disable VMX on the current CPU +/** + * cpu_vmxoff() - Disable VMX on the current CPU * - * vmxoff causes a undefined-opcode exception if vmxon was not run - * on the CPU previously. Only call this function if you know VMX - * is enabled. + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) + * + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to + * atomically track post-VMXON state, e.g. this may be called in NMI context. + * Eat all faults as all other faults on VMXOFF faults are mode related, i.e. + * faults are guaranteed to be due to the !post-VMXON check unless the CPU is + * magically in RM, VM86, compat mode, or at CPL>0. */ static inline void cpu_vmxoff(void) { - asm volatile ("vmxoff"); + asm_volatile_goto("1: vmxoff\n\t" + _ASM_EXTABLE(1b, %l[fault]) :::: fault); +fault: cr4_clear_bits(X86_CR4_VMXE); } -- cgit v1.2.3 From ed72736183c45a413a8d6974dd04be90f514cb6b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 30 Dec 2020 16:26:55 -0800 Subject: x86/reboot: Force all cpus to exit VMX root if VMX is supported Force all CPUs to do VMXOFF (via NMI shootdown) during an emergency reboot if VMX is _supported_, as VMX being off on the current CPU does not prevent other CPUs from being in VMX root (post-VMXON). This fixes a bug where a crash/panic reboot could leave other CPUs in VMX root and prevent them from being woken via INIT-SIPI-SIPI in the new kernel. Fixes: d176720d34c7 ("x86: disable VMX on all CPUs on reboot") Cc: stable@vger.kernel.org Suggested-by: Sean Christopherson Signed-off-by: David P. Reed [sean: reworked changelog and further tweaked comment] Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kernel/reboot.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index db115943e8bd..efbaef8b4de9 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -538,31 +538,21 @@ static void emergency_vmx_disable_all(void) local_irq_disable(); /* - * We need to disable VMX on all CPUs before rebooting, otherwise - * we risk hanging up the machine, because the CPU ignores INIT - * signals when VMX is enabled. + * Disable VMX on all CPUs before rebooting, otherwise we risk hanging + * the machine, because the CPU blocks INIT when it's in VMX root. * - * We can't take any locks and we may be on an inconsistent - * state, so we use NMIs as IPIs to tell the other CPUs to disable - * VMX and halt. + * We can't take any locks and we may be on an inconsistent state, so + * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. * - * For safety, we will avoid running the nmi_shootdown_cpus() - * stuff unnecessarily, but we don't have a way to check - * if other CPUs have VMX enabled. So we will call it only if the - * CPU we are running on has VMX enabled. - * - * We will miss cases where VMX is not enabled on all CPUs. This - * shouldn't do much harm because KVM always enable VMX on all - * CPUs anyway. But we can miss it on the small window where KVM - * is still enabling VMX. + * Do the NMI shootdown even if VMX if off on _this_ CPU, as that + * doesn't prevent a different CPU from being in VMX root operation. */ - if (cpu_has_vmx() && cpu_vmx_enabled()) { - /* Disable VMX on this CPU. */ - cpu_vmxoff(); + if (cpu_has_vmx()) { + /* Safely force _this_ CPU out of VMX root operation. */ + __cpu_emergency_vmxoff(); - /* Halt and disable VMX on the other CPUs */ + /* Halt and exit VMX root operation on the other CPUs. */ nmi_shootdown_cpus(vmxoff_nmi); - } } -- cgit v1.2.3 From 53666664a3052e4ea3ddcb183460dfbc30f1d056 Mon Sep 17 00:00:00 2001 From: "David P. Reed" Date: Wed, 30 Dec 2020 16:26:56 -0800 Subject: x86/virt: Mark flags and memory as clobbered by VMXOFF Explicitly tell the compiler that VMXOFF modifies flags (like all VMX instructions), and mark memory as clobbered since VMXOFF must not be reordered and also may have memory side effects (though the kernel really shouldn't be accessing the root VMCS anyways). Practically speaking, adding the clobbers is most likely a nop; the primary motivation is to properly document VMXOFF's behavior. For the flags clobber, both Clang and GCC automatically mark flags as clobbered; this is noted in commit 4b1e54786e48 ("KVM/x86: Use assembly instruction mnemonics instead of .byte streams"), which intentionally removed the previous clobber. But, neither Clang nor GCC documents this behavior, and there's no downside to including the clobber. For the memory clobber, the RFLAGS.IF and CR4.VMXE manipulations that immediately follow VMXOFF have compiler barriers of their own, i.e. VMXOFF can't get reordered after clearing CR4.VMXE, which is really what's of interest. Cc: Randy Dunlap Signed-off-by: David P. Reed [sean: rewrote changelog, dropped comment adjustments] Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/virtext.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index fda3e7747c22..2cc585467667 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -44,7 +44,8 @@ static inline int cpu_has_vmx(void) static inline void cpu_vmxoff(void) { asm_volatile_goto("1: vmxoff\n\t" - _ASM_EXTABLE(1b, %l[fault]) :::: fault); + _ASM_EXTABLE(1b, %l[fault]) + ::: "cc", "memory" : fault); fault: cr4_clear_bits(X86_CR4_VMXE); } -- cgit v1.2.3 From 150f17bfab37e981ba03b37440638138ff2aa9ec Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Wed, 30 Dec 2020 16:26:57 -0800 Subject: KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw Replace inline assembly in nested_vmx_check_vmentry_hw with a call to __vmx_vcpu_run. The function is not performance critical, so (double) GPR save/restore in __vmx_vcpu_run can be tolerated, as far as performance effects are concerned. Cc: Paolo Bonzini Cc: Sean Christopherson Reviewed-and-tested-by: Sean Christopherson Signed-off-by: Uros Bizjak [sean: dropped versioning info from changelog] Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 32 +++----------------------------- arch/x86/kvm/vmx/vmenter.S | 2 +- arch/x86/kvm/vmx/vmx.c | 2 -- arch/x86/kvm/vmx/vmx.h | 1 + 4 files changed, 5 insertions(+), 32 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 459254c4a5f3..887283b1df43 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -12,6 +12,7 @@ #include "nested.h" #include "pmu.h" #include "trace.h" +#include "vmx.h" #include "x86.h" static bool __read_mostly enable_shadow_vmcs = 1; @@ -3057,35 +3058,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->host_state.cr4 = cr4; } - asm( - "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ - "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" - "je 1f \n\t" - __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" - "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" - "1: \n\t" - "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ - - /* Check if vmlaunch or vmresume is needed */ - "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" - - /* - * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set - * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail - * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the - * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. - */ - "call vmx_vmenter\n\t" - - CC_SET(be) - : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) - : [HOST_RSP]"r"((unsigned long)HOST_RSP), - [loaded_vmcs]"r"(vmx->loaded_vmcs), - [launched]"i"(offsetof(struct loaded_vmcs, launched)), - [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), - [wordsize]"i"(sizeof(ulong)) - : "memory" - ); + vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, + vmx->loaded_vmcs->launched); if (vmx->msr_autoload.host.nr) vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index e85aa5faa22d..3a6461694fc2 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -44,7 +44,7 @@ * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump * to vmx_vmexit. */ -SYM_FUNC_START(vmx_vmenter) +SYM_FUNC_START_LOCAL(vmx_vmenter) /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ je 2f diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3b8ada67e852..2a6d29c47b31 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6658,8 +6658,6 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) } } -bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); - static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) { diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index f64c9b57ed00..12c53d05a902 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -388,6 +388,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); +bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From 5ef940bd9ac267e5764ee886956352935dc7bad3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 30 Dec 2020 16:26:58 -0800 Subject: KVM: VMX: Move Intel PT shenanigans out of VMXON/VMXOFF flows Move the Intel PT tracking outside of the VMXON/VMXOFF helpers so that a future patch can drop KVM's kvm_cpu_vmxoff() in favor of the kernel's cpu_vmxoff() without an associated PT functional change, and without losing symmetry between the VMXON and VMXOFF flows. Barring undocumented behavior, this should have no meaningful effects as Intel PT behavior does not interact with CR4.VMXE. Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2a6d29c47b31..55ae56c24468 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2313,7 +2313,6 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) u64 msr; cr4_set_bits(X86_CR4_VMXE); - intel_pt_handle_vmx(1); asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, %l[fault]) @@ -2324,7 +2323,6 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) fault: WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); - intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); return -EFAULT; @@ -2347,9 +2345,13 @@ static int hardware_enable(void) !hv_get_vp_assist_page(cpu)) return -EFAULT; + intel_pt_handle_vmx(1); + r = kvm_cpu_vmxon(phys_addr); - if (r) + if (r) { + intel_pt_handle_vmx(0); return r; + } if (enable_ept) ept_sync_global(); @@ -2375,7 +2377,6 @@ static void kvm_cpu_vmxoff(void) { asm volatile (__ex("vmxoff")); - intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); } @@ -2383,6 +2384,8 @@ static void hardware_disable(void) { vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); + + intel_pt_handle_vmx(0); } /* -- cgit v1.2.3 From 6a289139479845f12e44108b4d52cf0194bd5ff3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 30 Dec 2020 16:26:59 -0800 Subject: KVM: VMX: Use the kernel's version of VMXOFF Drop kvm_cpu_vmxoff() in favor of the kernel's cpu_vmxoff(). Modify the latter to return -EIO on fault so that KVM can invoke kvm_spurious_fault() when appropriate. In addition to the obvious code reuse, dropping kvm_cpu_vmxoff() also eliminates VMX's last usage of the __ex()/__kvm_handle_fault_on_reboot() macros, thus helping pave the way toward dropping them entirely. Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/virtext.h | 7 ++++++- arch/x86/kvm/vmx/vmx.c | 15 +++------------ 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 2cc585467667..8757078d4442 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -41,13 +41,18 @@ static inline int cpu_has_vmx(void) * faults are guaranteed to be due to the !post-VMXON check unless the CPU is * magically in RM, VM86, compat mode, or at CPL>0. */ -static inline void cpu_vmxoff(void) +static inline int cpu_vmxoff(void) { asm_volatile_goto("1: vmxoff\n\t" _ASM_EXTABLE(1b, %l[fault]) ::: "cc", "memory" : fault); + + cr4_clear_bits(X86_CR4_VMXE); + return 0; + fault: cr4_clear_bits(X86_CR4_VMXE); + return -EIO; } static inline int cpu_vmx_enabled(void) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 55ae56c24468..02949a650cd1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2369,21 +2369,12 @@ static void vmclear_local_loaded_vmcss(void) __loaded_vmcs_clear(v); } - -/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() - * tricks. - */ -static void kvm_cpu_vmxoff(void) -{ - asm volatile (__ex("vmxoff")); - - cr4_clear_bits(X86_CR4_VMXE); -} - static void hardware_disable(void) { vmclear_local_loaded_vmcss(); - kvm_cpu_vmxoff(); + + if (cpu_vmxoff()) + kvm_spurious_fault(); intel_pt_handle_vmx(0); } -- cgit v1.2.3 From 35a7831912f455d7d19b31cd9300e73f585a077b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 30 Dec 2020 16:27:00 -0800 Subject: KVM: SVM: Use asm goto to handle unexpected #UD on SVM instructions Add svm_asm*() macros, a la the existing vmx_asm*() macros, to handle faults on SVM instructions instead of using the generic __ex(), a.k.a. __kvm_handle_fault_on_reboot(). Using asm goto generates slightly better code as it eliminates the in-line JMP+CALL sequences that are needed by __kvm_handle_fault_on_reboot() to avoid triggering BUG() from fixup (which generates bad stack traces). Using SVM specific macros also drops the last user of __ex() and the the last asm linkage to kvm_spurious_fault(), and adds a helper for VMSAVE, which may gain an addition call site in the future (as part of optimizing the SVM context switching). Signed-off-by: Sean Christopherson Message-Id: <20201231002702.2223707-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 3 ++- arch/x86/kvm/svm/svm.c | 16 +----------- arch/x86/kvm/svm/svm_ops.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+), 16 deletions(-) create mode 100644 arch/x86/kvm/svm/svm_ops.h diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 8dfe8988be8d..a3e2b29f484d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -22,6 +22,7 @@ #include "x86.h" #include "svm.h" +#include "svm_ops.h" #include "cpuid.h" #include "trace.h" @@ -2076,7 +2077,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) * of which one step is to perform a VMLOAD. Since hardware does not * perform a VMSAVE on VMRUN, the host savearea must be updated. */ - asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory"); + vmsave(__sme_page_pa(sd->save_area)); /* * Certain MSRs are restored on VMEXIT, only save ones that aren't diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 504e9474547b..58f771b48c2e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -41,6 +41,7 @@ #include "trace.h" #include "svm.h" +#include "svm_ops.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) @@ -248,21 +249,6 @@ u32 svm_msrpm_offset(u32 msr) #define MAX_INST_SIZE 15 -static inline void clgi(void) -{ - asm volatile (__ex("clgi")); -} - -static inline void stgi(void) -{ - asm volatile (__ex("stgi")); -} - -static inline void invlpga(unsigned long addr, u32 asid) -{ - asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr)); -} - static int get_max_npt_level(void) { #ifdef CONFIG_X86_64 diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h new file mode 100644 index 000000000000..9f007bc8409a --- /dev/null +++ b/arch/x86/kvm/svm/svm_ops.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_SVM_OPS_H +#define __KVM_X86_SVM_OPS_H + +#include + +#include + +#define svm_asm(insn, clobber...) \ +do { \ + asm_volatile_goto("1: " __stringify(insn) "\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + ::: clobber : fault); \ + return; \ +fault: \ + kvm_spurious_fault(); \ +} while (0) + +#define svm_asm1(insn, op1, clobber...) \ +do { \ + asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + :: op1 : clobber : fault); \ + return; \ +fault: \ + kvm_spurious_fault(); \ +} while (0) + +#define svm_asm2(insn, op1, op2, clobber...) \ +do { \ + asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + :: op1, op2 : clobber : fault); \ + return; \ +fault: \ + kvm_spurious_fault(); \ +} while (0) + +static inline void clgi(void) +{ + svm_asm(clgi); +} + +static inline void stgi(void) +{ + svm_asm(stgi); +} + +static inline void invlpga(unsigned long addr, u32 asid) +{ + svm_asm2(invlpga, "c"(asid), "a"(addr)); +} + +/* + * Despite being a physical address, the portion of rAX that is consumed by + * VMSAVE, VMLOAD, etc... is still controlled by the effective address size, + * hence 'unsigned long' instead of 'hpa_t'. + */ +static inline void vmsave(unsigned long pa) +{ + svm_asm1(vmsave, "a" (pa), "memory"); +} + +#endif /* __KVM_X86_SVM_OPS_H */ -- cgit v1.2.3 From e79b91bb3c916a52ce823ab60489c717c925c49f Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Tue, 2 Feb 2021 13:01:24 -0600 Subject: KVM: SVM: use vmsave/vmload for saving/restoring additional host state Using a guest workload which simply issues 'hlt' in a tight loop to generate VMEXITs, it was observed (on a recent EPYC processor) that a significant amount of the VMEXIT overhead measured on the host was the result of MSR reads/writes in svm_vcpu_load/svm_vcpu_put according to perf: 67.49%--kvm_arch_vcpu_ioctl_run | |--23.13%--vcpu_put | kvm_arch_vcpu_put | | | |--21.31%--native_write_msr | | | --1.27%--svm_set_cr4 | |--16.11%--vcpu_load | | | --15.58%--kvm_arch_vcpu_load | | | |--13.97%--svm_set_cr4 | | | | | |--12.64%--native_read_msr Most of these MSRs relate to 'syscall'/'sysenter' and segment bases, and can be saved/restored using 'vmsave'/'vmload' instructions rather than explicit MSR reads/writes. In doing so there is a significant reduction in the svm_vcpu_load/svm_vcpu_put overhead measured for the above workload: 50.92%--kvm_arch_vcpu_ioctl_run | |--19.28%--disable_nmi_singlestep | |--13.68%--vcpu_load | kvm_arch_vcpu_load | | | |--9.19%--svm_set_cr4 | | | | | --6.44%--native_read_msr | | | --3.55%--native_write_msr | |--6.05%--kvm_inject_nmi |--2.80%--kvm_sev_es_mmio_read |--2.19%--vcpu_put | | | --1.25%--kvm_arch_vcpu_put | native_write_msr Quantifying this further, if we look at the raw cycle counts for a normal iteration of the above workload (according to 'rdtscp'), kvm_arch_vcpu_ioctl_run() takes ~4600 cycles from start to finish with the current behavior. Using 'vmsave'/'vmload', this is reduced to ~2800 cycles, a savings of 39%. While this approach doesn't seem to manifest in any noticeable improvement for more realistic workloads like UnixBench, netperf, and kernel builds, likely due to their exit paths generally involving IO with comparatively high latencies, it does improve overall overhead of KVM_RUN significantly, which may still be noticeable for certain situations. It also simplifies some aspects of the code. With this change, explicit save/restore is no longer needed for the following host MSRs, since they are documented[1] as being part of the VMCB State Save Area: MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_FS_BASE, MSR_GS_BASE and only the following MSR needs individual handling in svm_vcpu_put/svm_vcpu_load: MSR_TSC_AUX We could drop the host_save_user_msrs array/loop and instead handle MSR read/write of MSR_TSC_AUX directly, but we leave that for now as a potential follow-up. Since 'vmsave'/'vmload' also handles the LDTR and FS/GS segment registers (and associated hidden state)[2], some of the code previously used to handle this is no longer needed, so we drop it as well. The first public release of the SVM spec[3] also documents the same handling for the host state in question, so we make these changes unconditionally. Also worth noting is that we 'vmsave' to the same page that is subsequently used by 'vmrun' to record some host additional state. This is okay, since, in accordance with the spec[2], the additional state written to the page by 'vmrun' does not overwrite any fields written by 'vmsave'. This has also been confirmed through testing (for the above CPU, at least). [1] AMD64 Architecture Programmer's Manual, Rev 3.33, Volume 2, Appendix B, Table B-2 [2] AMD64 Architecture Programmer's Manual, Rev 3.31, Volume 3, Chapter 4, VMSAVE/VMLOAD [3] Secure Virtual Machine Architecture Reference Manual, Rev 3.01 Suggested-by: Tom Lendacky Signed-off-by: Michael Roth Message-Id: <20210202190126.2185715-2-michael.roth@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 31 +++++-------------------------- arch/x86/kvm/svm/svm.h | 17 ----------------- arch/x86/kvm/svm/svm_ops.h | 5 +++++ 3 files changed, 10 insertions(+), 43 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 58f771b48c2e..fc4c3cc60d69 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1427,16 +1427,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (sev_es_guest(svm->vcpu.kvm)) { sev_es_vcpu_load(svm, cpu); } else { -#ifdef CONFIG_X86_64 - rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); -#endif - savesegment(fs, svm->host.fs); - savesegment(gs, svm->host.gs); - svm->host.ldt = kvm_read_ldt(); - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); + + vmsave(__sme_page_pa(sd->save_area)); } if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { @@ -1468,17 +1463,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) if (sev_es_guest(svm->vcpu.kvm)) { sev_es_vcpu_put(svm); } else { - kvm_load_ldt(svm->host.ldt); -#ifdef CONFIG_X86_64 - loadsegment(fs, svm->host.fs); - wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase); - load_gs_index(svm->host.gs); -#else -#ifdef CONFIG_X86_32_LAZY_GS - loadsegment(gs, svm->host.gs); -#endif -#endif - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); @@ -3786,16 +3770,11 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, if (sev_es_guest(svm->vcpu.kvm)) { __svm_sev_es_vcpu_run(svm->vmcb_pa); } else { + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); -#ifdef CONFIG_X86_64 - native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); -#else - loadsegment(fs, svm->host.fs); -#ifndef CONFIG_X86_32_LAZY_GS - loadsegment(gs, svm->host.gs); -#endif -#endif + vmload(__sme_page_pa(sd->save_area)); } /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 6e7d070f8b86..2d227ba34368 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -27,17 +27,6 @@ static const struct svm_host_save_msrs { u32 index; /* Index of the MSR */ bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */ } host_save_user_msrs[] = { -#ifdef CONFIG_X86_64 - { .index = MSR_STAR, .sev_es_restored = true }, - { .index = MSR_LSTAR, .sev_es_restored = true }, - { .index = MSR_CSTAR, .sev_es_restored = true }, - { .index = MSR_SYSCALL_MASK, .sev_es_restored = true }, - { .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true }, - { .index = MSR_FS_BASE, .sev_es_restored = true }, -#endif - { .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true }, - { .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true }, - { .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true }, { .index = MSR_TSC_AUX, .sev_es_restored = false }, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) @@ -130,12 +119,6 @@ struct vcpu_svm { u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; - struct { - u16 fs; - u16 gs; - u16 ldt; - u64 gs_base; - } host; u64 spec_ctrl; /* diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h index 9f007bc8409a..8170f2a5a16f 100644 --- a/arch/x86/kvm/svm/svm_ops.h +++ b/arch/x86/kvm/svm/svm_ops.h @@ -61,4 +61,9 @@ static inline void vmsave(unsigned long pa) svm_asm1(vmsave, "a" (pa), "memory"); } +static inline void vmload(unsigned long pa) +{ + svm_asm1(vmload, "a" (pa), "memory"); +} + #endif /* __KVM_X86_SVM_OPS_H */ -- cgit v1.2.3 From 553cc15f6e8d1467dc09a1fe6e51fcdea5f96471 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Tue, 2 Feb 2021 13:01:25 -0600 Subject: KVM: SVM: remove uneeded fields from host_save_users_msrs Now that the set of host user MSRs that need to be individually saved/restored are the same with/without SEV-ES, we can drop the .sev_es_restored flag and just iterate through the list unconditionally for both cases. A subsequent patch can then move these loops to a common path. Signed-off-by: Michael Roth Message-Id: <20210202190126.2185715-3-michael.roth@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 16 ++++------------ arch/x86/kvm/svm/svm.c | 6 ++---- arch/x86/kvm/svm/svm.h | 7 ++----- 3 files changed, 8 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a3e2b29f484d..87167ef8ca23 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2083,12 +2083,8 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) * Certain MSRs are restored on VMEXIT, only save ones that aren't * restored. */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { - if (host_save_user_msrs[i].sev_es_restored) - continue; - - rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); - } + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); /* XCR0 is restored on VMEXIT, save the current host value */ hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); @@ -2109,12 +2105,8 @@ void sev_es_vcpu_put(struct vcpu_svm *svm) * Certain MSRs are restored on VMEXIT and were saved with vmsave in * sev_es_vcpu_load() above. Only restore ones that weren't. */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { - if (host_save_user_msrs[i].sev_es_restored) - continue; - - wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); - } + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fc4c3cc60d69..8b2cbcb50239 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1428,8 +1428,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) sev_es_vcpu_load(svm, cpu); } else { for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i].index, - svm->host_user_msrs[i]); + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); vmsave(__sme_page_pa(sd->save_area)); } @@ -1464,8 +1463,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) sev_es_vcpu_put(svm); } else { for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i].index, - svm->host_user_msrs[i]); + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2d227ba34368..42c1faaddd17 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -23,11 +23,8 @@ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) -static const struct svm_host_save_msrs { - u32 index; /* Index of the MSR */ - bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */ -} host_save_user_msrs[] = { - { .index = MSR_TSC_AUX, .sev_es_restored = false }, +static const u32 host_save_user_msrs[] = { + MSR_TSC_AUX, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) -- cgit v1.2.3 From a7fc06dd2f14f88e611a968f7efa6532cdd5529a Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Tue, 2 Feb 2021 13:01:26 -0600 Subject: KVM: SVM: use .prepare_guest_switch() to handle CPU register save/setup Currently we save host state like user-visible host MSRs, and do some initial guest register setup for MSR_TSC_AUX and MSR_AMD64_TSC_RATIO in svm_vcpu_load(). Defer this until just before we enter the guest by moving the handling to kvm_x86_ops.prepare_guest_switch() similarly to how it is done for the VMX implementation. Additionally, since handling of saving/restoring host user MSRs is the same both with/without SEV-ES enabled, move that handling to common code. Suggested-by: Sean Christopherson Signed-off-by: Michael Roth Message-Id: <20210202190126.2185715-4-michael.roth@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 22 +-------------- arch/x86/kvm/svm/svm.c | 76 ++++++++++++++++++++++++++++++++++---------------- arch/x86/kvm/svm/svm.h | 5 ++-- 3 files changed, 56 insertions(+), 47 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 87167ef8ca23..874ea309279f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2066,11 +2066,10 @@ void sev_es_create_vcpu(struct vcpu_svm *svm) sev_enc_bit)); } -void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) +void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); struct vmcb_save_area *hostsa; - unsigned int i; /* * As an SEV-ES guest, hardware will restore the host state on VMEXIT, @@ -2079,13 +2078,6 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) */ vmsave(__sme_page_pa(sd->save_area)); - /* - * Certain MSRs are restored on VMEXIT, only save ones that aren't - * restored. - */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); - /* XCR0 is restored on VMEXIT, save the current host value */ hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); @@ -2097,18 +2089,6 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) hostsa->xss = host_xss; } -void sev_es_vcpu_put(struct vcpu_svm *svm) -{ - unsigned int i; - - /* - * Certain MSRs are restored on VMEXIT and were saved with vmsave in - * sev_es_vcpu_load() above. Only restore ones that weren't. - */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); -} - void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) { struct vcpu_svm *svm = to_svm(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8b2cbcb50239..645bad5e8e81 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1366,6 +1366,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) svm->vmsa = page_address(vmsa_page); svm->asid_generation = 0; + svm->guest_state_loaded = false; init_vmcb(svm); svm_init_osvw(vcpu); @@ -1413,23 +1414,30 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); } -static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - struct svm_cpu_data *sd = per_cpu(svm_data, cpu); - int i; + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + unsigned int i; - if (unlikely(cpu != vcpu->cpu)) { - svm->asid_generation = 0; - vmcb_mark_all_dirty(svm->vmcb); - } + if (svm->guest_state_loaded) + return; + + /* + * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save + * area (non-sev-es). Save ones that aren't so we can restore them + * individually later. + */ + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); + /* + * Save additional host state that will be restored on VMEXIT (sev-es) + * or subsequent vmload of host save area. + */ if (sev_es_guest(svm->vcpu.kvm)) { - sev_es_vcpu_load(svm, cpu); + sev_es_prepare_guest_switch(svm, vcpu->cpu); } else { - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); - vmsave(__sme_page_pa(sd->save_area)); } @@ -1440,10 +1448,42 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } + /* This assumes that the kernel never uses MSR_TSC_AUX */ if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + svm->guest_state_loaded = true; +} + +static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + unsigned int i; + + if (!svm->guest_state_loaded) + return; + + /* + * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save + * area (non-sev-es). Restore the ones that weren't. + */ + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); + + svm->guest_state_loaded = false; +} + +static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + + if (unlikely(cpu != vcpu->cpu)) { + svm->asid_generation = 0; + vmcb_mark_all_dirty(svm->vmcb); + } + if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; indirect_branch_prediction_barrier(); @@ -1453,18 +1493,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void svm_vcpu_put(struct kvm_vcpu *vcpu) { - struct vcpu_svm *svm = to_svm(vcpu); - int i; - avic_vcpu_put(vcpu); + svm_prepare_host_switch(vcpu); ++vcpu->stat.host_state_reload; - if (sev_es_guest(svm->vcpu.kvm)) { - sev_es_vcpu_put(svm); - } else { - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); - } } static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) @@ -3620,10 +3652,6 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) invlpga(gva, svm->vmcb->control.asid); } -static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) -{ -} - static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 42c1faaddd17..39e071fdab0c 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -172,6 +172,8 @@ struct vcpu_svm { u64 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + + bool guest_state_loaded; }; struct svm_cpu_data { @@ -567,9 +569,8 @@ int sev_handle_vmgexit(struct vcpu_svm *svm); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); void sev_es_init_vmcb(struct vcpu_svm *svm); void sev_es_create_vcpu(struct vcpu_svm *svm); -void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu); -void sev_es_vcpu_put(struct vcpu_svm *svm); void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); +void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); /* vmenter.S */ -- cgit v1.2.3 From 04548ed0206ca895c8edd6a078c20a218423890b Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Tue, 2 Feb 2021 20:28:40 -0500 Subject: KVM: SVM: Replace hard-coded value with #define Replace the hard-coded value for bit# 1 in EFLAGS, with the available #define. Signed-off-by: Krish Sadhukhan Message-Id: <20210203012842.101447-2-krish.sadhukhan@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 645bad5e8e81..30681633193e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1207,7 +1207,7 @@ static void init_vmcb(struct vcpu_svm *svm) svm_set_efer(&svm->vcpu, 0); save->dr6 = 0xffff0ff0; - kvm_set_rflags(&svm->vcpu, 2); + kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED); save->rip = 0x0000fff0; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; -- cgit v1.2.3 From 805a0f83907cface14897ada15b61190f3bc2962 Mon Sep 17 00:00:00 2001 From: Stephen Zhang Date: Wed, 27 Jan 2021 10:08:45 +0800 Subject: KVM: x86/mmu: Add '__func__' in rmap_printk() Given the common pattern: rmap_printk("%s:"..., __func__,...) we could improve this by adding '__func__' in rmap_printk(). Signed-off-by: Stephen Zhang Message-Id: <1611713325-3591-1-git-send-email-stephenzhangzsd@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 20 ++++++++++---------- arch/x86/kvm/mmu/mmu_internal.h | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index bc63406850c9..525d66641ce1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -844,17 +844,17 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, int i, count = 0; if (!rmap_head->val) { - rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); + rmap_printk("%p %llx 0->1\n", spte, *spte); rmap_head->val = (unsigned long)spte; } else if (!(rmap_head->val & 1)) { - rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); + rmap_printk("%p %llx 1->many\n", spte, *spte); desc = mmu_alloc_pte_list_desc(vcpu); desc->sptes[0] = (u64 *)rmap_head->val; desc->sptes[1] = spte; rmap_head->val = (unsigned long)desc | 1; ++count; } else { - rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); + rmap_printk("%p %llx many->many\n", spte, *spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); while (desc->sptes[PTE_LIST_EXT-1]) { count += PTE_LIST_EXT; @@ -906,14 +906,14 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) pr_err("%s: %p 0->BUG\n", __func__, spte); BUG(); } else if (!(rmap_head->val & 1)) { - rmap_printk("%s: %p 1->0\n", __func__, spte); + rmap_printk("%p 1->0\n", spte); if ((u64 *)rmap_head->val != spte) { pr_err("%s: %p 1->BUG\n", __func__, spte); BUG(); } rmap_head->val = 0; } else { - rmap_printk("%s: %p many->many\n", __func__, spte); + rmap_printk("%p many->many\n", spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); prev_desc = NULL; while (desc) { @@ -1115,7 +1115,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) !(pt_protect && spte_can_locklessly_be_made_writable(spte))) return false; - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); + rmap_printk("spte %p %llx\n", sptep, *sptep); if (pt_protect) spte &= ~SPTE_MMU_WRITEABLE; @@ -1142,7 +1142,7 @@ static bool spte_clear_dirty(u64 *sptep) { u64 spte = *sptep; - rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); + rmap_printk("spte %p %llx\n", sptep, *sptep); MMU_WARN_ON(!spte_ad_enabled(spte)); spte &= ~shadow_dirty_mask; @@ -1184,7 +1184,7 @@ static bool spte_set_dirty(u64 *sptep) { u64 spte = *sptep; - rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); + rmap_printk("spte %p %llx\n", sptep, *sptep); /* * Similar to the !kvm_x86_ops.slot_disable_log_dirty case, @@ -1332,7 +1332,7 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) bool flush = false; while ((sptep = rmap_get_first(rmap_head, &iter))) { - rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); + rmap_printk("spte %p %llx.\n", sptep, *sptep); pte_list_remove(rmap_head, sptep); flush = true; @@ -1364,7 +1364,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, restart: for_each_rmap_spte(rmap_head, &iter, sptep) { - rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", + rmap_printk("spte %p %llx gfn %llx (%d)\n", sptep, *sptep, gfn, level); need_flush = 1; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index bfc6389edc28..5ec15e4160b1 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -12,7 +12,7 @@ extern bool dbg; #define pgprintk(x...) do { if (dbg) printk(x); } while (0) -#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) +#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) #define MMU_WARN_ON(x) WARN_ON(x) #else #define pgprintk(x...) do { } while (0) -- cgit v1.2.3 From fb18d053b7f823e6a9acf62d1be5b986ca614253 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 29 Jan 2021 17:18:21 +0100 Subject: selftest: kvm: x86: test KVM_GET_CPUID2 and guest visible CPUIDs against KVM_GET_SUPPORTED_CPUID Commit 181f494888d5 ("KVM: x86: fix CPUID entries returned by KVM_GET_CPUID2 ioctl") revealed that we're not testing KVM_GET_CPUID2 ioctl at all. Add a test for it and also check that from inside the guest visible CPUIDs are equal to it's output. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210129161821.74635-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/processor.h | 14 ++ tools/testing/selftests/kvm/lib/x86_64/processor.c | 42 +++++ .../testing/selftests/kvm/x86_64/get_cpuid_test.c | 175 +++++++++++++++++++++ 5 files changed, 233 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/get_cpuid_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index c08f26d0aec4..1b32c97f8c82 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -7,6 +7,7 @@ /x86_64/cr4_cpuid_sync_test /x86_64/debug_regs /x86_64/evmcs_test +/x86_64/get_cpuid_test /x86_64/kvm_pv_test /x86_64/hyperv_cpuid /x86_64/mmio_warning_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 60410e9ebd41..9616f77cc5af 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -40,6 +40,7 @@ LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_ha TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test +TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index fce4cd4d4895..582dfb23f30c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -263,6 +263,19 @@ static inline void outl(uint16_t port, uint32_t value) __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); } +static inline void cpuid(uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + /* ecx is often an input as well as an output. */ + asm volatile("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx) + : "memory"); +} + #define SET_XMM(__var, __xmm) \ asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm) @@ -341,6 +354,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void); uint64_t kvm_get_feature_msr(uint64_t msr_index); struct kvm_cpuid2 *kvm_get_supported_cpuid(void); +struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 66926a7f29cc..5a15264c2ef2 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -703,6 +703,48 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index) return buffer.entry.data; } +/* + * VM VCPU CPUID Set + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU id + * + * Output Args: None + * + * Return: KVM CPUID (KVM_GET_CPUID2) + * + * Set the VCPU's CPUID. + */ +struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct kvm_cpuid2 *cpuid; + int rc, max_ent; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + + cpuid = allocate_kvm_cpuid2(); + max_ent = cpuid->nent; + + for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) { + rc = ioctl(vcpu->fd, KVM_GET_CPUID2, cpuid); + if (!rc) + break; + + TEST_ASSERT(rc == -1 && errno == E2BIG, + "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d", + rc, errno); + } + + TEST_ASSERT(rc == 0, "KVM_GET_CPUID2 failed, rc: %i errno: %i", + rc, errno); + + return cpuid; +} + + + /* * Locate a cpuid entry. * diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c new file mode 100644 index 000000000000..9b78e8889638 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021, Red Hat Inc. + * + * Generic tests for KVM CPUID set/get ioctls + */ +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 0 + +/* CPUIDs known to differ */ +struct { + u32 function; + u32 index; +} mangled_cpuids[] = { + {.function = 0xd, .index = 0}, +}; + +static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid) +{ + int i; + u32 eax, ebx, ecx, edx; + + for (i = 0; i < guest_cpuid->nent; i++) { + eax = guest_cpuid->entries[i].function; + ecx = guest_cpuid->entries[i].index; + + cpuid(&eax, &ebx, &ecx, &edx); + + GUEST_ASSERT(eax == guest_cpuid->entries[i].eax && + ebx == guest_cpuid->entries[i].ebx && + ecx == guest_cpuid->entries[i].ecx && + edx == guest_cpuid->entries[i].edx); + } + +} + +static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid) +{ + u32 eax = 0x40000000, ebx, ecx = 0, edx; + + cpuid(&eax, &ebx, &ecx, &edx); + + GUEST_ASSERT(eax == 0x40000001); +} + +static void guest_main(struct kvm_cpuid2 *guest_cpuid) +{ + GUEST_SYNC(1); + + test_guest_cpuids(guest_cpuid); + + GUEST_SYNC(2); + + test_cpuid_40000000(guest_cpuid); + + GUEST_DONE(); +} + +static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie) +{ + int i; + + for (i = 0; i < sizeof(mangled_cpuids); i++) { + if (mangled_cpuids[i].function == entrie->function && + mangled_cpuids[i].index == entrie->index) + return true; + } + + return false; +} + +static void check_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *entrie) +{ + int i; + + for (i = 0; i < cpuid->nent; i++) { + if (cpuid->entries[i].function == entrie->function && + cpuid->entries[i].index == entrie->index) { + if (is_cpuid_mangled(entrie)) + return; + + TEST_ASSERT(cpuid->entries[i].eax == entrie->eax && + cpuid->entries[i].ebx == entrie->ebx && + cpuid->entries[i].ecx == entrie->ecx && + cpuid->entries[i].edx == entrie->edx, + "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x", + entrie->function, entrie->index, + cpuid->entries[i].eax, cpuid->entries[i].ebx, + cpuid->entries[i].ecx, cpuid->entries[i].edx, + entrie->eax, entrie->ebx, entrie->ecx, entrie->edx); + return; + } + } + + TEST_ASSERT(false, "CPUID 0x%x.%x not found", entrie->function, entrie->index); +} + +static void compare_cpuids(struct kvm_cpuid2 *cpuid1, struct kvm_cpuid2 *cpuid2) +{ + int i; + + for (i = 0; i < cpuid1->nent; i++) + check_cpuid(cpuid2, &cpuid1->entries[i]); + + for (i = 0; i < cpuid2->nent; i++) + check_cpuid(cpuid1, &cpuid2->entries[i]); +} + +static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage) +{ + struct ucall uc; + + _vcpu_run(vm, vcpuid); + + switch (get_ucall(vm, vcpuid, &uc)) { + case UCALL_SYNC: + TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage + 1, + "Stage %d: Unexpected register values vmexit, got %lx", + stage + 1, (ulong)uc.args[1]); + return; + case UCALL_DONE: + return; + case UCALL_ABORT: + TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2], uc.args[3]); + default: + TEST_ASSERT(false, "Unexpected exit: %s", + exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason)); + } +} + +struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) +{ + int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); + vm_vaddr_t gva = vm_vaddr_alloc(vm, size, + getpagesize(), 0, 0); + struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); + + memcpy(guest_cpuids, cpuid, size); + + *p_gva = gva; + return guest_cpuids; +} + +int main(void) +{ + struct kvm_cpuid2 *supp_cpuid, *cpuid2; + vm_vaddr_t cpuid_gva; + struct kvm_vm *vm; + int stage; + + vm = vm_create_default(VCPU_ID, 0, guest_main); + + supp_cpuid = kvm_get_supported_cpuid(); + cpuid2 = vcpu_get_cpuid(vm, VCPU_ID); + + compare_cpuids(supp_cpuid, cpuid2); + + vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2); + + vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva); + + for (stage = 0; stage < 3; stage++) + run_vcpu(vm, VCPU_ID, stage); + + kvm_vm_free(vm); +} -- cgit v1.2.3 From d89d04ab6030c73b24bbe032fb474e0fb74dd891 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 2 Feb 2021 10:44:23 -0500 Subject: KVM: move EXIT_FASTPATH_REENTER_GUEST to common code Now that KVM is using static calls, calling vmx_vcpu_run and vmx_sync_pir_to_irr does not incur anymore the cost of a retpoline. Therefore there is no need anymore to handle EXIT_FASTPATH_REENTER_GUEST in vendor code. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 19 +------------------ arch/x86/kvm/x86.c | 17 ++++++++++++++--- arch/x86/kvm/x86.h | 1 - 3 files changed, 15 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 02949a650cd1..0cd24ae2db08 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6711,11 +6711,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) { - fastpath_t exit_fastpath; struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4; -reenter_guest: /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && vmx->loaded_vmcs->soft_vnmi_blocked)) @@ -6865,22 +6863,7 @@ reenter_guest: if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; - exit_fastpath = vmx_exit_handlers_fastpath(vcpu); - if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) { - if (!kvm_vcpu_exit_request(vcpu)) { - /* - * FIXME: this goto should be a loop in vcpu_enter_guest, - * but it would incur the cost of a retpoline for now. - * Revisit once static calls are available. - */ - if (vcpu->arch.apicv_active) - vmx_sync_pir_to_irr(vcpu); - goto reenter_guest; - } - exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; - } - - return exit_fastpath; + return vmx_exit_handlers_fastpath(vcpu); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4758afe59795..39f01a051868 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1796,12 +1796,11 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); -bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) { return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending(); } -EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request); /* * The fast path for frequent and performance sensitive wrmsr emulation, @@ -9044,7 +9043,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; } - exit_fastpath = static_call(kvm_x86_run)(vcpu); + for (;;) { + exit_fastpath = static_call(kvm_x86_run)(vcpu); + if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) + break; + + if (unlikely(kvm_vcpu_exit_request(vcpu))) { + exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; + break; + } + + if (vcpu->arch.apicv_active) + static_call(kvm_x86_sync_pir_to_irr)(vcpu); + } /* * Do this here before restoring debug registers on the host. And diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 5f7c224f4bf2..cc652a348acc 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -395,7 +395,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); int kvm_spec_ctrl_test_value(u64 value); bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); -bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e); int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); -- cgit v1.2.3 From fd238002616c5f1e44d9d8feed42580059eab87d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 13 Nov 2020 08:31:09 -0500 Subject: KVM: cleanup DR6/DR7 reserved bits checks kvm_dr6_valid and kvm_dr7_valid check that bits 63:32 are zero. Using them makes it easier to review the code for inconsistencies. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 39f01a051868..a28068075f8b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4422,9 +4422,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, if (dbgregs->flags) return -EINVAL; - if (dbgregs->dr6 & ~0xffffffffull) + if (!kvm_dr6_valid(dbgregs->dr6)) return -EINVAL; - if (dbgregs->dr7 & ~0xffffffffull) + if (!kvm_dr7_valid(dbgregs->dr7)) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); -- cgit v1.2.3 From bbefd4fc8f590e765b455ef0a4deb6c105fee305 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 14 Dec 2020 07:49:54 -0500 Subject: KVM: x86: move kvm_inject_gp up from kvm_set_xcr to callers Push the injection of #GP up to the callers, so that they can just use kvm_complete_insn_gp. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 7 ++----- arch/x86/kvm/vmx/vmx.c | 5 ++--- arch/x86/kvm/x86.c | 10 ++++------ 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 30681633193e..fdea0994e94f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2357,11 +2357,8 @@ static int xsetbv_interception(struct vcpu_svm *svm) u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u32 index = kvm_rcx_read(&svm->vcpu); - if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { - return kvm_skip_emulated_instruction(&svm->vcpu); - } - - return 1; + int err = kvm_set_xcr(&svm->vcpu, index, new_bv); + return kvm_complete_insn_gp(&svm->vcpu, err); } static int rdpru_interception(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0cd24ae2db08..d995f47a08ef 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5223,9 +5223,8 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_rcx_read(vcpu); - if (kvm_set_xcr(vcpu, index, new_bv) == 0) - return kvm_skip_emulated_instruction(vcpu); - return 1; + int err = kvm_set_xcr(vcpu, index, new_bv); + return kvm_complete_insn_gp(vcpu, err); } static int handle_apic_access(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a28068075f8b..912fc418ce99 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -986,12 +986,10 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || - __kvm_set_xcr(vcpu, index, xcr)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - return 0; + if (static_call(kvm_x86_get_cpl)(vcpu) == 0) + return __kvm_set_xcr(vcpu, index, xcr); + + return 1; } EXPORT_SYMBOL_GPL(kvm_set_xcr); -- cgit v1.2.3 From e28a436ca4f65384cceaf3f4da0e00aa74244e6a Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:07 -0800 Subject: KVM: x86/mmu: change TDP MMU yield function returns to match cond_resched Currently the TDP MMU yield / cond_resched functions either return nothing or return true if the TLBs were not flushed. These are confusing semantics, especially when making control flow decisions in calling functions. To clean things up, change both functions to have the same return value semantics as cond_resched: true if the thread yielded, false if it did not. If the function yielded in the _flush_ version, then the TLBs will have been flushed. Reviewed-by: Peter Feiner Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-2-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index b56d604809b8..3a7ab76aec11 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -413,8 +413,15 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, _mmu->shadow_root_level, _start, _end) /* - * Flush the TLB if the process should drop kvm->mmu_lock. - * Return whether the caller still needs to flush the tlb. + * Flush the TLB and yield if the MMU lock is contended or this thread needs to + * return control to the scheduler. + * + * If this function yields, it will also reset the tdp_iter's walk over the + * paging structure and the calling function should allow the iterator to + * continue its traversal from the paging structure root. + * + * Return true if this function yielded, the TLBs were flushed, and the + * iterator's traversal was reset. Return false if a yield was not needed. */ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter) { @@ -422,18 +429,32 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *it kvm_flush_remote_tlbs(kvm); cond_resched_lock(&kvm->mmu_lock); tdp_iter_refresh_walk(iter); - return false; - } else { return true; } + + return false; } -static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) +/* + * Yield if the MMU lock is contended or this thread needs to return control + * to the scheduler. + * + * If this function yields, it will also reset the tdp_iter's walk over the + * paging structure and the calling function should allow the iterator to + * continue its traversal from the paging structure root. + * + * Return true if this function yielded and the iterator's traversal was reset. + * Return false if a yield was not needed. + */ +static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) { if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { cond_resched_lock(&kvm->mmu_lock); tdp_iter_refresh_walk(iter); + return true; } + + return false; } /* @@ -469,10 +490,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte(kvm, &iter, 0); - if (can_yield) - flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter); - else - flush_needed = true; + flush_needed = !can_yield || + !tdp_mmu_iter_flush_cond_resched(kvm, &iter); } return flush_needed; } @@ -1072,7 +1091,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm, tdp_mmu_set_spte(kvm, &iter, 0); - spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter); + spte_set = !tdp_mmu_iter_flush_cond_resched(kvm, &iter); } if (spte_set) -- cgit v1.2.3 From fe43fa2f407b9d513f7bcf18142e14e1bf1508d6 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:08 -0800 Subject: KVM: x86/mmu: Add comment on __tdp_mmu_set_spte __tdp_mmu_set_spte is a very important function in the TDP MMU which already accepts several arguments and will take more in future commits. To offset this complexity, add a comment to the function describing each of the arguemnts. No functional change intended. Reviewed-by: Peter Feiner Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-3-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 3a7ab76aec11..ef3d3e058fa0 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -357,6 +357,22 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, new_spte, level); } +/* + * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping + * @kvm: kvm instance + * @iter: a tdp_iter instance currently on the SPTE that should be set + * @new_spte: The value the SPTE should be set to + * @record_acc_track: Notify the MM subsystem of changes to the accessed state + * of the page. Should be set unless handling an MMU + * notifier for access tracking. Leaving record_acc_track + * unset in that case prevents page accesses from being + * double counted. + * @record_dirty_log: Record the page as dirty in the dirty bitmap if + * appropriate for the change being made. Should be set + * unless performing certain dirty logging operations. + * Leaving record_dirty_log unset in that case prevents page + * writes from being double counted. + */ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte, bool record_acc_track, bool record_dirty_log) -- cgit v1.2.3 From 3a9a4aa5657471a02ffb7f9b7f3b7a468b3f257b Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:09 -0800 Subject: KVM: x86/mmu: Add lockdep when setting a TDP MMU SPTE Add lockdep to __tdp_mmu_set_spte to ensure that SPTEs are only modified under the MMU lock. No functional change intended. Reviewed-by: Peter Feiner Reviewed-by: Sean Christopherson Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-4-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index ef3d3e058fa0..05e6e2c4554f 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -381,6 +381,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *root = sptep_to_sp(root_pt); int as_id = kvm_mmu_page_as_id(root); + lockdep_assert_held(&kvm->mmu_lock); + WRITE_ONCE(*iter->sptep, new_spte); __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, -- cgit v1.2.3 From 734e45b329d626d2c14e2bcf8be3d069a33c3316 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:10 -0800 Subject: KVM: x86/mmu: Don't redundantly clear TDP MMU pt memory The KVM MMU caches already guarantee that shadow page table memory will be zeroed, so there is no reason to re-zero the page in the TDP MMU page fault handler. No functional change intended. Reviewed-by: Peter Feiner Reviewed-by: Sean Christopherson Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-5-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 05e6e2c4554f..810de3280b97 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -655,7 +655,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages); child_pt = sp->spt; - clear_page(child_pt); new_spte = make_nonleaf_spte(child_pt, !shadow_accessed_mask); -- cgit v1.2.3 From a066e61f13cf4b17d043ad8bea0cdde2b1e5ee49 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:11 -0800 Subject: KVM: x86/mmu: Factor out handling of removed page tables Factor out the code to handle a disconnected subtree of the TDP paging structure from the code to handle the change to an individual SPTE. Future commits will build on this to allow asynchronous page freeing. No functional change intended. Reviewed-by: Peter Feiner Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-6-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 71 +++++++++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 810de3280b97..e3066d08c1dc 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -234,6 +234,45 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, } } +/** + * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure + * + * @kvm: kvm instance + * @pt: the page removed from the paging structure + * + * Given a page table that has been removed from the TDP paging structure, + * iterates through the page table to clear SPTEs and free child page tables. + */ +static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt) +{ + struct kvm_mmu_page *sp = sptep_to_sp(pt); + int level = sp->role.level; + gfn_t gfn = sp->gfn; + u64 old_child_spte; + int i; + + trace_kvm_mmu_prepare_zap_page(sp); + + list_del(&sp->link); + + if (sp->lpage_disallowed) + unaccount_huge_nx_page(kvm, sp); + + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { + old_child_spte = READ_ONCE(*(pt + i)); + WRITE_ONCE(*(pt + i), 0); + handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), + gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)), + old_child_spte, 0, level - 1); + } + + kvm_flush_remote_tlbs_with_address(kvm, gfn, + KVM_PAGES_PER_HPAGE(level)); + + free_page((unsigned long)pt); + kmem_cache_free(mmu_page_header_cache, sp); +} + /** * handle_changed_spte - handle bookkeeping associated with an SPTE change * @kvm: kvm instance @@ -254,10 +293,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, bool was_leaf = was_present && is_last_spte(old_spte, level); bool is_leaf = is_present && is_last_spte(new_spte, level); bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); - u64 *pt; - struct kvm_mmu_page *sp; - u64 old_child_spte; - int i; WARN_ON(level > PT64_ROOT_MAX_LEVEL); WARN_ON(level < PG_LEVEL_4K); @@ -321,31 +356,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, * Recursively handle child PTs if the change removed a subtree from * the paging structure. */ - if (was_present && !was_leaf && (pfn_changed || !is_present)) { - pt = spte_to_child_pt(old_spte, level); - sp = sptep_to_sp(pt); - - trace_kvm_mmu_prepare_zap_page(sp); - - list_del(&sp->link); - - if (sp->lpage_disallowed) - unaccount_huge_nx_page(kvm, sp); - - for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - old_child_spte = READ_ONCE(*(pt + i)); - WRITE_ONCE(*(pt + i), 0); - handle_changed_spte(kvm, as_id, - gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)), - old_child_spte, 0, level - 1); - } - - kvm_flush_remote_tlbs_with_address(kvm, gfn, - KVM_PAGES_PER_HPAGE(level)); - - free_page((unsigned long)pt); - kmem_cache_free(mmu_page_header_cache, sp); - } + if (was_present && !was_leaf && (pfn_changed || !is_present)) + handle_removed_tdp_mmu_page(kvm, + spte_to_child_pt(old_spte, level)); } static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, -- cgit v1.2.3 From 8d1a182ea791f0111b0258c8f3eb8d77af0a8386 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:15 -0800 Subject: KVM: x86/mmu: Fix braces in kvm_recover_nx_lpages No functional change intended. Fixes: 29cf0f5007a2 ("kvm: x86/mmu: NX largepage recovery for TDP MMU") Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-10-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 525d66641ce1..5b364ff9c115 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5961,10 +5961,10 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) struct kvm_mmu_page, lpage_disallowed_link); WARN_ON_ONCE(!sp->lpage_disallowed); - if (sp->tdp_mmu_page) + if (sp->tdp_mmu_page) { kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); - else { + } else { kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); WARN_ON_ONCE(sp->lpage_disallowed); } -- cgit v1.2.3 From e139a34ef9d5627a41e1c02210229082140d1f92 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:17 -0800 Subject: KVM: x86/mmu: Merge flush and non-flush tdp_mmu_iter_cond_resched The flushing and non-flushing variants of tdp_mmu_iter_cond_resched have almost identical implementations. Merge the two functions and add a flush parameter. Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-12-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 42 +++++++++++++----------------------------- 1 file changed, 13 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index e3066d08c1dc..8f7b120597f3 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -443,33 +443,13 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ _mmu->shadow_root_level, _start, _end) -/* - * Flush the TLB and yield if the MMU lock is contended or this thread needs to - * return control to the scheduler. - * - * If this function yields, it will also reset the tdp_iter's walk over the - * paging structure and the calling function should allow the iterator to - * continue its traversal from the paging structure root. - * - * Return true if this function yielded, the TLBs were flushed, and the - * iterator's traversal was reset. Return false if a yield was not needed. - */ -static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter) -{ - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - kvm_flush_remote_tlbs(kvm); - cond_resched_lock(&kvm->mmu_lock); - tdp_iter_refresh_walk(iter); - return true; - } - - return false; -} - /* * Yield if the MMU lock is contended or this thread needs to return control * to the scheduler. * + * If this function should yield and flush is set, it will perform a remote + * TLB flush before yielding. + * * If this function yields, it will also reset the tdp_iter's walk over the * paging structure and the calling function should allow the iterator to * continue its traversal from the paging structure root. @@ -477,9 +457,13 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *it * Return true if this function yielded and the iterator's traversal was reset. * Return false if a yield was not needed. */ -static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) +static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, + struct tdp_iter *iter, bool flush) { if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (flush) + kvm_flush_remote_tlbs(kvm); + cond_resched_lock(&kvm->mmu_lock); tdp_iter_refresh_walk(iter); return true; @@ -522,7 +506,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte(kvm, &iter, 0); flush_needed = !can_yield || - !tdp_mmu_iter_flush_cond_resched(kvm, &iter); + !tdp_mmu_iter_cond_resched(kvm, &iter, true); } return flush_needed; } @@ -894,7 +878,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - tdp_mmu_iter_cond_resched(kvm, &iter); + tdp_mmu_iter_cond_resched(kvm, &iter, false); } return spte_set; } @@ -953,7 +937,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - tdp_mmu_iter_cond_resched(kvm, &iter); + tdp_mmu_iter_cond_resched(kvm, &iter, false); } return spte_set; } @@ -1069,7 +1053,7 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; - tdp_mmu_iter_cond_resched(kvm, &iter); + tdp_mmu_iter_cond_resched(kvm, &iter, false); } return spte_set; @@ -1121,7 +1105,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm, tdp_mmu_set_spte(kvm, &iter, 0); - spte_set = !tdp_mmu_iter_flush_cond_resched(kvm, &iter); + spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true); } if (spte_set) -- cgit v1.2.3 From 74953d3530280dc53256054e1906f58d07bfba44 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:18 -0800 Subject: KVM: x86/mmu: Rename goal_gfn to next_last_level_gfn The goal_gfn field in tdp_iter can be misleading as it implies that it is the iterator's final goal. It is really a target for the lowest gfn mapped by the leaf level SPTE the iterator will traverse towards. Change the field's name to be more precise. Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-13-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_iter.c | 20 ++++++++++---------- arch/x86/kvm/mmu/tdp_iter.h | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 87b7e16911db..9917c55b7d24 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -22,21 +22,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level) /* * Sets a TDP iterator to walk a pre-order traversal of the paging structure - * rooted at root_pt, starting with the walk to translate goal_gfn. + * rooted at root_pt, starting with the walk to translate next_last_level_gfn. */ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, - int min_level, gfn_t goal_gfn) + int min_level, gfn_t next_last_level_gfn) { WARN_ON(root_level < 1); WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); - iter->goal_gfn = goal_gfn; + iter->next_last_level_gfn = next_last_level_gfn; iter->root_level = root_level; iter->min_level = min_level; iter->level = root_level; iter->pt_path[iter->level - 1] = root_pt; - iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter); iter->valid = true; @@ -82,7 +82,7 @@ static bool try_step_down(struct tdp_iter *iter) iter->level--; iter->pt_path[iter->level - 1] = child_pt; - iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter); return true; @@ -106,7 +106,7 @@ static bool try_step_side(struct tdp_iter *iter) return false; iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); - iter->goal_gfn = iter->gfn; + iter->next_last_level_gfn = iter->gfn; iter->sptep++; iter->old_spte = READ_ONCE(*iter->sptep); @@ -166,13 +166,13 @@ void tdp_iter_next(struct tdp_iter *iter) */ void tdp_iter_refresh_walk(struct tdp_iter *iter) { - gfn_t goal_gfn = iter->goal_gfn; + gfn_t next_last_level_gfn = iter->next_last_level_gfn; - if (iter->gfn > goal_gfn) - goal_gfn = iter->gfn; + if (iter->gfn > next_last_level_gfn) + next_last_level_gfn = iter->gfn; tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], - iter->root_level, iter->min_level, goal_gfn); + iter->root_level, iter->min_level, next_last_level_gfn); } u64 *tdp_iter_root_pt(struct tdp_iter *iter) diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index 47170d0dc98e..b2dd269c631f 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -15,7 +15,7 @@ struct tdp_iter { * The iterator will traverse the paging structure towards the mapping * for this GFN. */ - gfn_t goal_gfn; + gfn_t next_last_level_gfn; /* Pointers to the page tables traversed to reach the current SPTE */ u64 *pt_path[PT64_ROOT_MAX_LEVEL]; /* A pointer to the current SPTE */ @@ -52,7 +52,7 @@ struct tdp_iter { u64 *spte_to_child_pt(u64 pte, int level); void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, - int min_level, gfn_t goal_gfn); + int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_refresh_walk(struct tdp_iter *iter); u64 *tdp_iter_root_pt(struct tdp_iter *iter); -- cgit v1.2.3 From ed5e484b79e8a9b8be714bd85b6fc70bd6dc99a7 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:19 -0800 Subject: KVM: x86/mmu: Ensure forward progress when yielding in TDP MMU iter In some functions the TDP iter risks not making forward progress if two threads livelock yielding to one another. This is possible if two threads are trying to execute wrprot_gfn_range. Each could write protect an entry and then yield. This would reset the tdp_iter's walk over the paging structure and the loop would end up repeating the same entry over and over, preventing either thread from making forward progress. Fix this issue by only yielding if the loop has made forward progress since the last yield. Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU") Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-14-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_iter.c | 18 +----------------- arch/x86/kvm/mmu/tdp_iter.h | 7 ++++++- arch/x86/kvm/mmu/tdp_mmu.c | 21 ++++++++++++++++----- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 9917c55b7d24..1a09d212186b 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -31,6 +31,7 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); iter->next_last_level_gfn = next_last_level_gfn; + iter->yielded_gfn = iter->next_last_level_gfn; iter->root_level = root_level; iter->min_level = min_level; iter->level = root_level; @@ -158,23 +159,6 @@ void tdp_iter_next(struct tdp_iter *iter) iter->valid = false; } -/* - * Restart the walk over the paging structure from the root, starting from the - * highest gfn the iterator had previously reached. Assumes that the entire - * paging structure, except the root page, may have been completely torn down - * and rebuilt. - */ -void tdp_iter_refresh_walk(struct tdp_iter *iter) -{ - gfn_t next_last_level_gfn = iter->next_last_level_gfn; - - if (iter->gfn > next_last_level_gfn) - next_last_level_gfn = iter->gfn; - - tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], - iter->root_level, iter->min_level, next_last_level_gfn); -} - u64 *tdp_iter_root_pt(struct tdp_iter *iter) { return iter->pt_path[iter->root_level - 1]; diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index b2dd269c631f..d480c540ee27 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -16,6 +16,12 @@ struct tdp_iter { * for this GFN. */ gfn_t next_last_level_gfn; + /* + * The next_last_level_gfn at the time when the thread last + * yielded. Only yielding when the next_last_level_gfn != + * yielded_gfn helps ensure forward progress. + */ + gfn_t yielded_gfn; /* Pointers to the page tables traversed to reach the current SPTE */ u64 *pt_path[PT64_ROOT_MAX_LEVEL]; /* A pointer to the current SPTE */ @@ -54,7 +60,6 @@ u64 *spte_to_child_pt(u64 pte, int level); void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); -void tdp_iter_refresh_walk(struct tdp_iter *iter); u64 *tdp_iter_root_pt(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 8f7b120597f3..7cfc0639b1ef 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -451,8 +451,9 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, * TLB flush before yielding. * * If this function yields, it will also reset the tdp_iter's walk over the - * paging structure and the calling function should allow the iterator to - * continue its traversal from the paging structure root. + * paging structure and the calling function should skip to the next + * iteration to allow the iterator to continue its traversal from the + * paging structure root. * * Return true if this function yielded and the iterator's traversal was reset. * Return false if a yield was not needed. @@ -460,12 +461,22 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter, bool flush) { + /* Ensure forward progress has been made before yielding. */ + if (iter->next_last_level_gfn == iter->yielded_gfn) + return false; + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { if (flush) kvm_flush_remote_tlbs(kvm); cond_resched_lock(&kvm->mmu_lock); - tdp_iter_refresh_walk(iter); + + WARN_ON(iter->gfn > iter->next_last_level_gfn); + + tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], + iter->root_level, iter->min_level, + iter->next_last_level_gfn); + return true; } @@ -505,8 +516,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte(kvm, &iter, 0); - flush_needed = !can_yield || - !tdp_mmu_iter_cond_resched(kvm, &iter, true); + flush_needed = !(can_yield && + tdp_mmu_iter_cond_resched(kvm, &iter, true)); } return flush_needed; } -- cgit v1.2.3 From 1af4a96025b33587ca953c7ef12a1b20c6e70412 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:20 -0800 Subject: KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed Given certain conditions, some TDP MMU functions may not yield reliably / frequently enough. For example, if a paging structure was very large but had few, if any writable entries, wrprot_gfn_range could traverse many entries before finding a writable entry and yielding because the check for yielding only happens after an SPTE is modified. Fix this issue by moving the yield to the beginning of the loop. Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU") Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-15-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7cfc0639b1ef..c8a1149cb229 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -501,6 +501,12 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, bool flush_needed = false; tdp_root_for_each_pte(iter, root, start, end) { + if (can_yield && + tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) { + flush_needed = false; + continue; + } + if (!is_shadow_present_pte(iter.old_spte)) continue; @@ -515,9 +521,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, continue; tdp_mmu_set_spte(kvm, &iter, 0); - - flush_needed = !(can_yield && - tdp_mmu_iter_cond_resched(kvm, &iter, true)); + flush_needed = true; } return flush_needed; } @@ -880,6 +884,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, for_each_tdp_pte_min_level(iter, root->spt, root->role.level, min_level, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; @@ -888,8 +895,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter, false); } return spte_set; } @@ -933,6 +938,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, bool spte_set = false; tdp_root_for_each_leaf_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -947,8 +955,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter, false); } return spte_set; } @@ -1056,6 +1062,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, bool spte_set = false; tdp_root_for_each_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (!is_shadow_present_pte(iter.old_spte)) continue; @@ -1063,8 +1072,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter, false); } return spte_set; @@ -1105,6 +1112,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm, bool spte_set = false; tdp_root_for_each_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { + spte_set = false; + continue; + } + if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; @@ -1116,7 +1128,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm, tdp_mmu_set_spte(kvm, &iter, 0); - spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true); + spte_set = true; } if (spte_set) -- cgit v1.2.3 From 0f99ee2c7a58fc281c084d3acc0f0013bec7ec5a Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:21 -0800 Subject: KVM: x86/mmu: Skip no-op changes in TDP MMU functions Skip setting SPTEs if no change is expected. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-16-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index c8a1149cb229..aeb05f626b55 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -888,7 +888,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, continue; if (!is_shadow_present_pte(iter.old_spte) || - !is_last_spte(iter.old_spte, iter.level)) + !is_last_spte(iter.old_spte, iter.level) || + !(iter.old_spte & PT_WRITABLE_MASK)) continue; new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -1065,7 +1066,8 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) continue; - if (!is_shadow_present_pte(iter.old_spte)) + if (!is_shadow_present_pte(iter.old_spte) || + iter.old_spte & shadow_dirty_mask) continue; new_spte = iter.old_spte | shadow_dirty_mask; -- cgit v1.2.3 From f1b3b06a058bb5c636ffad0afae138fe30775881 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:22 -0800 Subject: KVM: x86/mmu: Clear dirtied pages mask bit before early break In clear_dirty_pt_masked, the loop is intended to exit early after processing each of the GFNs with corresponding bits set in mask. This does not work as intended if another thread has already cleared the dirty bit or writable bit on the SPTE. In that case, the loop would proceed to the next iteration early and the bit in mask would not be cleared. As a result the loop could not exit early and would proceed uselessly. Move the unsetting of the mask bit before the check for a no-op SPTE change. Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU") Suggested-by: Sean Christopherson Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-17-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index aeb05f626b55..a75e92164a8b 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1007,6 +1007,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, !(mask & (1UL << (iter.gfn - gfn)))) continue; + mask &= ~(1UL << (iter.gfn - gfn)); + if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -1020,8 +1022,6 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, } tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); - - mask &= ~(1UL << (iter.gfn - gfn)); } } -- cgit v1.2.3 From 7cca2d0b7e7d9f3cd740d41afdc00051c9b508a0 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:23 -0800 Subject: KVM: x86/mmu: Protect TDP MMU page table memory with RCU In order to enable concurrent modifications to the paging structures in the TDP MMU, threads must be able to safely remove pages of page table memory while other threads are traversing the same memory. To ensure threads do not access PT memory after it is freed, protect PT memory with RCU. Protecting concurrent accesses to page table memory from use-after-free bugs could also have been acomplished using walk_shadow_page_lockless_begin/end() and READING_SHADOW_PAGE_TABLES, coupling with the barriers in a TLB flush. The use of RCU for this case has several distinct advantages over that approach. 1. Disabling interrupts for long running operations is not desirable. Future commits will allow operations besides page faults to operate without the exclusive protection of the MMU lock and those operations are too long to disable iterrupts for their duration. 2. The use of RCU here avoids long blocking / spinning operations in perfromance critical paths. By freeing memory with an asynchronous RCU API we avoid the longer wait times TLB flushes experience when overlapping with a thread in walk_shadow_page_lockless_begin/end(). 3. RCU provides a separation of concerns when removing memory from the paging structure. Because the RCU callback to free memory can be scheduled immediately after a TLB flush, there's no need for the thread to manually free a queue of pages later, as commit_zap_pages does. Fixes: 95fb5b0258b7 ("kvm: x86/mmu: Support MMIO in the TDP MMU") Reviewed-by: Peter Feiner Suggested-by: Sean Christopherson Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-18-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu_internal.h | 3 ++ arch/x86/kvm/mmu/tdp_iter.c | 16 +++---- arch/x86/kvm/mmu/tdp_iter.h | 10 +++-- arch/x86/kvm/mmu/tdp_mmu.c | 95 +++++++++++++++++++++++++++++++++++++---- 4 files changed, 103 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 5ec15e4160b1..98db78a26957 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -57,6 +57,9 @@ struct kvm_mmu_page { atomic_t write_flooding_count; bool tdp_mmu_page; + + /* Used for freeing the page asyncronously if it is a TDP MMU page. */ + struct rcu_head rcu_head; }; extern struct kmem_cache *mmu_page_header_cache; diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 1a09d212186b..e5f148106e20 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -12,7 +12,7 @@ static void tdp_iter_refresh_sptep(struct tdp_iter *iter) { iter->sptep = iter->pt_path[iter->level - 1] + SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); - iter->old_spte = READ_ONCE(*iter->sptep); + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); } static gfn_t round_gfn_for_level(gfn_t gfn, int level) @@ -35,7 +35,7 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, iter->root_level = root_level; iter->min_level = min_level; iter->level = root_level; - iter->pt_path[iter->level - 1] = root_pt; + iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt; iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter); @@ -48,7 +48,7 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, * address of the child page table referenced by the SPTE. Returns null if * there is no such entry. */ -u64 *spte_to_child_pt(u64 spte, int level) +tdp_ptep_t spte_to_child_pt(u64 spte, int level) { /* * There's no child entry if this entry isn't present or is a @@ -57,7 +57,7 @@ u64 *spte_to_child_pt(u64 spte, int level) if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) return NULL; - return __va(spte_to_pfn(spte) << PAGE_SHIFT); + return (tdp_ptep_t)__va(spte_to_pfn(spte) << PAGE_SHIFT); } /* @@ -66,7 +66,7 @@ u64 *spte_to_child_pt(u64 spte, int level) */ static bool try_step_down(struct tdp_iter *iter) { - u64 *child_pt; + tdp_ptep_t child_pt; if (iter->level == iter->min_level) return false; @@ -75,7 +75,7 @@ static bool try_step_down(struct tdp_iter *iter) * Reread the SPTE before stepping down to avoid traversing into page * tables that are no longer linked from this entry. */ - iter->old_spte = READ_ONCE(*iter->sptep); + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); child_pt = spte_to_child_pt(iter->old_spte, iter->level); if (!child_pt) @@ -109,7 +109,7 @@ static bool try_step_side(struct tdp_iter *iter) iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); iter->next_last_level_gfn = iter->gfn; iter->sptep++; - iter->old_spte = READ_ONCE(*iter->sptep); + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); return true; } @@ -159,7 +159,7 @@ void tdp_iter_next(struct tdp_iter *iter) iter->valid = false; } -u64 *tdp_iter_root_pt(struct tdp_iter *iter) +tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter) { return iter->pt_path[iter->root_level - 1]; } diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index d480c540ee27..4cc177d75c4a 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -7,6 +7,8 @@ #include "mmu.h" +typedef u64 __rcu *tdp_ptep_t; + /* * A TDP iterator performs a pre-order walk over a TDP paging structure. */ @@ -23,9 +25,9 @@ struct tdp_iter { */ gfn_t yielded_gfn; /* Pointers to the page tables traversed to reach the current SPTE */ - u64 *pt_path[PT64_ROOT_MAX_LEVEL]; + tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL]; /* A pointer to the current SPTE */ - u64 *sptep; + tdp_ptep_t sptep; /* The lowest GFN mapped by the current SPTE */ gfn_t gfn; /* The level of the root page given to the iterator */ @@ -55,11 +57,11 @@ struct tdp_iter { #define for_each_tdp_pte(iter, root, root_level, start, end) \ for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end) -u64 *spte_to_child_pt(u64 pte, int level); +tdp_ptep_t spte_to_child_pt(u64 pte, int level); void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); -u64 *tdp_iter_root_pt(struct tdp_iter *iter); +tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index a75e92164a8b..9e4009068920 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -42,6 +42,12 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) return; WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); + + /* + * Ensure that all the outstanding RCU callbacks to free shadow pages + * can run before the VM is torn down. + */ + rcu_barrier(); } static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) @@ -196,6 +202,28 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) return __pa(root->spt); } +static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) +{ + free_page((unsigned long)sp->spt); + kmem_cache_free(mmu_page_header_cache, sp); +} + +/* + * This is called through call_rcu in order to free TDP page table memory + * safely with respect to other kernel threads that may be operating on + * the memory. + * By only accessing TDP MMU page table memory in an RCU read critical + * section, and freeing it after a grace period, lockless access to that + * memory won't use it after it is freed. + */ +static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) +{ + struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, + rcu_head); + + tdp_mmu_free_sp(sp); +} + static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level); @@ -269,8 +297,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt) kvm_flush_remote_tlbs_with_address(kvm, gfn, KVM_PAGES_PER_HPAGE(level)); - free_page((unsigned long)pt); - kmem_cache_free(mmu_page_header_cache, sp); + call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); } /** @@ -390,13 +417,13 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte, bool record_acc_track, bool record_dirty_log) { - u64 *root_pt = tdp_iter_root_pt(iter); + tdp_ptep_t root_pt = tdp_iter_root_pt(iter); struct kvm_mmu_page *root = sptep_to_sp(root_pt); int as_id = kvm_mmu_page_as_id(root); lockdep_assert_held(&kvm->mmu_lock); - WRITE_ONCE(*iter->sptep, new_spte); + WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, iter->level); @@ -466,10 +493,13 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, return false; if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + rcu_read_unlock(); + if (flush) kvm_flush_remote_tlbs(kvm); cond_resched_lock(&kvm->mmu_lock); + rcu_read_lock(); WARN_ON(iter->gfn > iter->next_last_level_gfn); @@ -500,6 +530,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, struct tdp_iter iter; bool flush_needed = false; + rcu_read_lock(); + tdp_root_for_each_pte(iter, root, start, end) { if (can_yield && tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) { @@ -523,6 +555,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte(kvm, &iter, 0); flush_needed = true; } + + rcu_read_unlock(); return flush_needed; } @@ -568,13 +602,15 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, if (unlikely(is_noslot_pfn(pfn))) { new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); - trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte); + trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, + new_spte); } else { make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, pfn, iter->old_spte, prefault, true, map_writable, !shadow_accessed_mask, &new_spte); - trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); + trace_kvm_mmu_set_spte(iter->level, iter->gfn, + rcu_dereference(iter->sptep)); } if (new_spte == iter->old_spte) @@ -597,7 +633,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, if (unlikely(is_mmio_spte(new_spte))) ret = RET_PF_EMULATE; - trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); + trace_kvm_mmu_set_spte(iter->level, iter->gfn, + rcu_dereference(iter->sptep)); if (!prefault) vcpu->stat.pf_fixed++; @@ -635,6 +672,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, huge_page_disallowed, &req_level); trace_kvm_mmu_spte_requested(gpa, level, pfn); + + rcu_read_lock(); + tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { if (nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(iter.old_spte, gfn, @@ -660,7 +700,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, * because the new value informs the !present * path below. */ - iter.old_spte = READ_ONCE(*iter.sptep); + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); } if (!is_shadow_present_pte(iter.old_spte)) { @@ -678,11 +718,14 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, } } - if (WARN_ON(iter.level != level)) + if (WARN_ON(iter.level != level)) { + rcu_read_unlock(); return RET_PF_RETRY; + } ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, pfn, prefault); + rcu_read_unlock(); return ret; } @@ -753,6 +796,8 @@ static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, int young = 0; u64 new_spte = 0; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, start, end) { /* * If we have a non-accessed entry we don't need to change the @@ -784,6 +829,8 @@ static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, trace_kvm_age_page(iter.gfn, iter.level, slot, young); } + rcu_read_unlock(); + return young; } @@ -829,6 +876,8 @@ static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, u64 new_spte; int need_flush = 0; + rcu_read_lock(); + WARN_ON(pte_huge(*ptep)); new_pfn = pte_pfn(*ptep); @@ -857,6 +906,8 @@ static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, if (need_flush) kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); + rcu_read_unlock(); + return 0; } @@ -880,6 +931,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); for_each_tdp_pte_min_level(iter, root->spt, root->role.level, @@ -897,6 +950,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; } + + rcu_read_unlock(); return spte_set; } @@ -938,6 +993,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, start, end) { if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) continue; @@ -957,6 +1014,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; } + + rcu_read_unlock(); return spte_set; } @@ -998,6 +1057,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, struct tdp_iter iter; u64 new_spte; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), gfn + BITS_PER_LONG) { if (!mask) @@ -1023,6 +1084,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); } + + rcu_read_unlock(); } /* @@ -1062,6 +1125,8 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_pte(iter, root, start, end) { if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) continue; @@ -1076,6 +1141,7 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, spte_set = true; } + rcu_read_unlock(); return spte_set; } @@ -1113,6 +1179,8 @@ static void zap_collapsible_spte_range(struct kvm *kvm, kvm_pfn_t pfn; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_pte(iter, root, start, end) { if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { spte_set = false; @@ -1133,6 +1201,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm, spte_set = true; } + rcu_read_unlock(); if (spte_set) kvm_flush_remote_tlbs(kvm); } @@ -1169,6 +1238,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, u64 new_spte; bool spte_set = false; + rcu_read_lock(); + tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { if (!is_writable_pte(iter.old_spte)) break; @@ -1180,6 +1251,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, spte_set = true; } + rcu_read_unlock(); + return spte_set; } @@ -1220,10 +1293,14 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, *root_level = vcpu->arch.mmu->shadow_root_level; + rcu_read_lock(); + tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { leaf = iter.level; sptes[leaf] = iter.old_spte; } + rcu_read_unlock(); + return leaf; } -- cgit v1.2.3 From 26128cb6c7e6731fe644c687af97733adfdb5ee9 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:12 -0800 Subject: locking/rwlocks: Add contention detection for rwlocks rwlocks do not currently have any facility to detect contention like spinlocks do. In order to allow users of rwlocks to better manage latency, add contention detection for queued rwlocks. CC: Ingo Molnar CC: Will Deacon Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-7-bgardon@google.com> Signed-off-by: Paolo Bonzini --- include/asm-generic/qrwlock.h | 24 ++++++++++++++++++------ include/linux/rwlock.h | 7 +++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 84ce841ce735..0020d3b820a7 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -14,6 +14,7 @@ #include #include +#include /* * Writer states & reader shift and bias. @@ -116,15 +117,26 @@ static inline void queued_write_unlock(struct qrwlock *lock) smp_store_release(&lock->wlocked, 0); } +/** + * queued_rwlock_is_contended - check if the lock is contended + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock contended, 0 otherwise + */ +static inline int queued_rwlock_is_contended(struct qrwlock *lock) +{ + return arch_spin_is_locked(&lock->wait_lock); +} + /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ -#define arch_read_lock(l) queued_read_lock(l) -#define arch_write_lock(l) queued_write_lock(l) -#define arch_read_trylock(l) queued_read_trylock(l) -#define arch_write_trylock(l) queued_write_trylock(l) -#define arch_read_unlock(l) queued_read_unlock(l) -#define arch_write_unlock(l) queued_write_unlock(l) +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) +#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) #endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 3dcd617e65ae..7ce9a51ae5c0 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -128,4 +128,11 @@ do { \ 1 : ({ local_irq_restore(flags); 0; }); \ }) +#ifdef arch_rwlock_is_contended +#define rwlock_is_contended(lock) \ + arch_rwlock_is_contended(&(lock)->raw_lock) +#else +#define rwlock_is_contended(lock) ((void)(lock), 0) +#endif /* arch_rwlock_is_contended */ + #endif /* __LINUX_RWLOCK_H */ -- cgit v1.2.3 From a09a689a534183c48f200bc2de1ae61ae9c462ad Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:13 -0800 Subject: sched: Add needbreak for rwlocks Contention awareness while holding a spin lock is essential for reducing latency when long running kernel operations can hold that lock. Add the same contention detection interface for read/write spin locks. CC: Ingo Molnar CC: Will Deacon Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-8-bgardon@google.com> Signed-off-by: Paolo Bonzini --- include/linux/sched.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e3a5eeec509..5d1378e5a040 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1912,6 +1912,23 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } +/* + * Check if a rwlock is contended. + * Returns non-zero if there is another task waiting on the rwlock. + * Returns zero if the lock is not contended or the system / underlying + * rwlock implementation does not support contention detection. + * Technically does not depend on CONFIG_PREEMPTION, but a general need + * for low latency. + */ +static inline int rwlock_needbreak(rwlock_t *lock) +{ +#ifdef CONFIG_PREEMPTION + return rwlock_is_contended(lock); +#else + return 0; +#endif +} + static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); -- cgit v1.2.3 From f3d4b4b1dc1c5fb9ea17cac14133463bfe72f170 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:14 -0800 Subject: sched: Add cond_resched_rwlock Safely rescheduling while holding a spin lock is essential for keeping long running kernel operations running smoothly. Add the facility to cond_resched rwlocks. CC: Ingo Molnar CC: Will Deacon Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-9-bgardon@google.com> Signed-off-by: Paolo Bonzini --- include/linux/sched.h | 12 ++++++++++++ kernel/sched/core.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 5d1378e5a040..3052d16da3cf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1883,12 +1883,24 @@ static inline int _cond_resched(void) { return 0; } }) extern int __cond_resched_lock(spinlock_t *lock); +extern int __cond_resched_rwlock_read(rwlock_t *lock); +extern int __cond_resched_rwlock_write(rwlock_t *lock); #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) +#define cond_resched_rwlock_read(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_read(lock); \ +}) + +#define cond_resched_rwlock_write(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_write(lock); \ +}) + static inline void cond_resched_rcu(void) { #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 15d2562118d1..ade357642279 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6695,6 +6695,46 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); +int __cond_resched_rwlock_read(rwlock_t *lock) +{ + int resched = should_resched(PREEMPT_LOCK_OFFSET); + int ret = 0; + + lockdep_assert_held_read(lock); + + if (rwlock_needbreak(lock) || resched) { + read_unlock(lock); + if (resched) + preempt_schedule_common(); + else + cpu_relax(); + ret = 1; + read_lock(lock); + } + return ret; +} +EXPORT_SYMBOL(__cond_resched_rwlock_read); + +int __cond_resched_rwlock_write(rwlock_t *lock) +{ + int resched = should_resched(PREEMPT_LOCK_OFFSET); + int ret = 0; + + lockdep_assert_held_write(lock); + + if (rwlock_needbreak(lock) || resched) { + write_unlock(lock); + if (resched) + preempt_schedule_common(); + else + cpu_relax(); + ret = 1; + write_lock(lock); + } + return ret; +} +EXPORT_SYMBOL(__cond_resched_rwlock_write); + /** * yield - yield the current processor to other threads. * -- cgit v1.2.3 From 531810caa9f4bc99ffbb90e09256792c56a6b07a Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:24 -0800 Subject: KVM: x86/mmu: Use an rwlock for the x86 MMU Add a read / write lock to be used in place of the MMU spinlock on x86. The rwlock will enable the TDP MMU to handle page faults, and other operations in parallel in future commits. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-19-bgardon@google.com> [Introduce virt/kvm/mmu_lock.h - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/mmu/mmu.c | 90 ++++++++++++++++++++--------------------- arch/x86/kvm/mmu/page_track.c | 8 ++-- arch/x86/kvm/mmu/paging_tmpl.h | 8 ++-- arch/x86/kvm/mmu/tdp_mmu.c | 20 ++++----- arch/x86/kvm/x86.c | 4 +- include/linux/kvm_host.h | 5 +++ virt/kvm/dirty_ring.c | 5 ++- virt/kvm/kvm_main.c | 37 +++++++++-------- virt/kvm/mmu_lock.h | 23 +++++++++++ 10 files changed, 118 insertions(+), 84 deletions(-) create mode 100644 virt/kvm/mmu_lock.h diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fa7b2df6422b..c445a51244d3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -348,6 +348,8 @@ struct kvm_mmu_root_info { #define KVM_MMU_NUM_PREV_ROOTS 3 +#define KVM_HAVE_MMU_RWLOCK + struct kvm_mmu_page; /* diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 5b364ff9c115..329930d57774 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2010,9 +2010,9 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, flush |= kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } - if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); - cond_resched_lock(&vcpu->kvm->mmu_lock); + cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); flush = false; } } @@ -2464,7 +2464,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) */ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) { - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - @@ -2475,7 +2475,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) @@ -2486,7 +2486,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { pgprintk("%s: gfn %llx role %x\n", __func__, gfn, sp->role.word); @@ -2494,7 +2494,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return r; } @@ -3186,7 +3186,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) @@ -3209,7 +3209,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); @@ -3230,16 +3230,16 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, { struct kvm_mmu_page *sp; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu)) { - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return INVALID_PAGE; } sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return __pa(sp->spt); } @@ -3410,17 +3410,17 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) !smp_load_acquire(&sp->unsync_children)) return; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); mmu_sync_children(vcpu, sp); kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return; } - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); for (i = 0; i < 4; ++i) { @@ -3434,7 +3434,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) } kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); @@ -3718,7 +3718,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return r; r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; r = make_mmu_pages_available(vcpu); @@ -3733,7 +3733,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, prefault, is_tdp); out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } @@ -4959,7 +4959,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, */ mmu_topup_memory_caches(vcpu, true); - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); @@ -4991,7 +4991,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, } kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) @@ -5189,14 +5189,14 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, if (iterator.rmap) flush |= fn(kvm, iterator.rmap); - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (flush && lock_flush_tlb) { kvm_flush_remote_tlbs_with_address(kvm, start_gfn, iterator.gfn - start_gfn + 1); flush = false; } - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } } @@ -5346,7 +5346,7 @@ restart: * be in active use by the guest. */ if (batch >= BATCH_ZAP_PAGES && - cond_resched_lock(&kvm->mmu_lock)) { + cond_resched_rwlock_write(&kvm->mmu_lock)) { batch = 0; goto restart; } @@ -5379,7 +5379,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) { lockdep_assert_held(&kvm->slots_lock); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); trace_kvm_mmu_zap_all_fast(kvm); /* @@ -5406,7 +5406,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) if (kvm->arch.tdp_mmu_enabled) kvm_tdp_mmu_zap_all(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) @@ -5448,7 +5448,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) int i; bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(memslot, slots) { @@ -5472,7 +5472,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) kvm_flush_remote_tlbs(kvm); } - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool slot_rmap_write_protect(struct kvm *kvm, @@ -5487,12 +5487,12 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, start_level, KVM_MAX_HUGEPAGE_LEVEL, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); /* * We can flush all the TLBs out of the mmu lock without TLB @@ -5552,13 +5552,13 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot) { /* FIXME: const-ify all uses of struct kvm_memory_slot. */ - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, kvm_mmu_zap_collapsible_spte, true); if (kvm->arch.tdp_mmu_enabled) kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, @@ -5581,11 +5581,11 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); /* * It's also safe to flush TLBs out of mmu lock here as currently this @@ -5603,12 +5603,12 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); @@ -5620,11 +5620,11 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); @@ -5637,14 +5637,14 @@ void kvm_mmu_zap_all(struct kvm *kvm) LIST_HEAD(invalid_list); int ign; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { if (WARN_ON(sp->role.invalid)) continue; if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) goto restart; - if (cond_resched_lock(&kvm->mmu_lock)) + if (cond_resched_rwlock_write(&kvm->mmu_lock)) goto restart; } @@ -5653,7 +5653,7 @@ restart: if (kvm->arch.tdp_mmu_enabled) kvm_tdp_mmu_zap_all(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) @@ -5713,7 +5713,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) continue; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvm_has_zapped_obsolete_pages(kvm)) { kvm_mmu_commit_zap_page(kvm, @@ -5724,7 +5724,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan); unlock: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); /* @@ -5944,7 +5944,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ulong to_zap; rcu_idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); ratio = READ_ONCE(nx_huge_pages_recovery_ratio); to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; @@ -5969,14 +5969,14 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) WARN_ON_ONCE(sp->lpage_disallowed); } - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { kvm_mmu_commit_zap_page(kvm, &invalid_list); - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); } diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 8443a675715b..34bb0ec69bd8 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -184,9 +184,9 @@ kvm_page_track_register_notifier(struct kvm *kvm, head = &kvm->arch.track_notifier_head; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); @@ -202,9 +202,9 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, head = &kvm->arch.track_notifier_head; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); hlist_del_rcu(&n->node); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); synchronize_srcu(&head->track_srcu); } EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 50e268eb8e1a..d9f66cc459e8 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -868,7 +868,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, } r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; @@ -881,7 +881,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } @@ -919,7 +919,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) return; } - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { level = iterator.level; sptep = iterator.sptep; @@ -954,7 +954,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) break; } - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 9e4009068920..f1fbed72e149 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -59,7 +59,7 @@ static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, struct kvm_mmu_page *root) { - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) return false; @@ -117,7 +117,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) { gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); WARN_ON(root->root_count); WARN_ON(!root->tdp_mmu_page); @@ -170,13 +170,13 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); /* Check for an existing root before allocating a new one. */ for_each_tdp_mmu_root(kvm, root) { if (root->role.word == role.word) { kvm_mmu_get_root(kvm, root); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return root; } } @@ -186,7 +186,7 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) list_add(&root->link, &kvm->arch.tdp_mmu_roots); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return root; } @@ -421,7 +421,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *root = sptep_to_sp(root_pt); int as_id = kvm_mmu_page_as_id(root); - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); @@ -492,13 +492,13 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, if (iter->next_last_level_gfn == iter->yielded_gfn) return false; - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { rcu_read_unlock(); if (flush) kvm_flush_remote_tlbs(kvm); - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); rcu_read_lock(); WARN_ON(iter->gfn > iter->next_last_level_gfn); @@ -1103,7 +1103,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root; int root_as_id; - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) @@ -1268,7 +1268,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, int root_as_id; bool spte_set = false; - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 912fc418ce99..b3b1cce939ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7112,9 +7112,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (vcpu->arch.mmu->direct_map) { unsigned int indirect_shadow_pages; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f3b1013fb22c..f417447129b9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -451,7 +451,12 @@ struct kvm_memslots { }; struct kvm { +#ifdef KVM_HAVE_MMU_RWLOCK + rwlock_t mmu_lock; +#else spinlock_t mmu_lock; +#endif /* KVM_HAVE_MMU_RWLOCK */ + struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 790f17325f8d..7aafefc50aa7 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -9,6 +9,7 @@ #include #include #include +#include "mmu_lock.h" int __weak kvm_cpu_dirty_log_size(void) { @@ -60,9 +61,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) if (!memslot || (offset + __fls(mask)) >= memslot->npages) return; - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); } int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 335a1a2b8edc..48ccdf4e3d04 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -58,6 +58,7 @@ #include "coalesced_mmio.h" #include "async_pf.h" +#include "mmu_lock.h" #include "vfio.h" #define CREATE_TRACE_POINTS @@ -459,13 +460,15 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + + KVM_MMU_LOCK(kvm); + kvm->mmu_notifier_seq++; if (kvm_set_spte_hva(kvm, address, pte)) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -476,7 +479,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, int need_tlb_flush = 0, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and @@ -489,7 +492,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, if (need_tlb_flush || kvm->tlbs_dirty) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -500,7 +503,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); /* * This sequence increase will notify the kvm page fault that * the page that is going to be mapped in the spte could have @@ -514,7 +517,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, * in conjunction with the smp_rmb in mmu_notifier_retry(). */ kvm->mmu_notifier_count--; - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); BUG_ON(kvm->mmu_notifier_count < 0); } @@ -528,13 +531,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, int young, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); young = kvm_age_hva(kvm, start, end); if (young) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; @@ -549,7 +552,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, int young, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); /* * Even though we do not flush TLB, this will still adversely * affect performance on pre-Haswell Intel EPT, where there is @@ -564,7 +567,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, * more sophisticated heuristic later. */ young = kvm_age_hva(kvm, start, end); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; @@ -578,9 +581,9 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, int young, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); young = kvm_test_age_hva(kvm, address); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; @@ -745,7 +748,7 @@ static struct kvm *kvm_create_vm(unsigned long type) if (!kvm) return ERR_PTR(-ENOMEM); - spin_lock_init(&kvm->mmu_lock); + KVM_MMU_LOCK_INIT(kvm); mmgrab(current->mm); kvm->mm = current->mm; kvm_eventfd_init(kvm); @@ -1525,7 +1528,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memset(dirty_bitmap_buffer, 0, n); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; @@ -1541,7 +1544,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); } - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); } if (flush) @@ -1636,7 +1639,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) return -EFAULT; - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); for (offset = log->first_page, i = offset / BITS_PER_LONG, n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; i++, offset += BITS_PER_LONG) { @@ -1659,7 +1662,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, offset, mask); } } - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); diff --git a/virt/kvm/mmu_lock.h b/virt/kvm/mmu_lock.h new file mode 100644 index 000000000000..9e1308f9734c --- /dev/null +++ b/virt/kvm/mmu_lock.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifndef KVM_MMU_LOCK_H +#define KVM_MMU_LOCK_H 1 + +/* + * Architectures can choose whether to use an rwlock or spinlock + * for the mmu_lock. These macros, for use in common code + * only, avoids using #ifdefs in places that must deal with + * multiple architectures. + */ + +#ifdef KVM_HAVE_MMU_RWLOCK +#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) +#else +#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) +#endif /* KVM_HAVE_MMU_RWLOCK */ + +#endif -- cgit v1.2.3 From a9442f594147f95307f691cfba0c31e25dc79b9d Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:25 -0800 Subject: KVM: x86/mmu: Factor out functions to add/remove TDP MMU pages Move the work of adding and removing TDP MMU pages to/from "secondary" data structures to helper functions. These functions will be built on in future commits to enable MMU operations to proceed (mostly) in parallel. No functional change expected. Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-20-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 47 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index f1fbed72e149..5a9e964e0178 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -262,6 +262,39 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, } } +/** + * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU + * + * @kvm: kvm instance + * @sp: the new page + * @account_nx: This page replaces a NX large page and should be marked for + * eventual reclaim. + */ +static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, + bool account_nx) +{ + lockdep_assert_held_write(&kvm->mmu_lock); + + list_add(&sp->link, &kvm->arch.tdp_mmu_pages); + if (account_nx) + account_huge_nx_page(kvm, sp); +} + +/** + * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU + * + * @kvm: kvm instance + * @sp: the page to be removed + */ +static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + lockdep_assert_held_write(&kvm->mmu_lock); + + list_del(&sp->link); + if (sp->lpage_disallowed) + unaccount_huge_nx_page(kvm, sp); +} + /** * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure * @@ -281,10 +314,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt) trace_kvm_mmu_prepare_zap_page(sp); - list_del(&sp->link); - - if (sp->lpage_disallowed) - unaccount_huge_nx_page(kvm, sp); + tdp_mmu_unlink_page(kvm, sp); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { old_child_spte = READ_ONCE(*(pt + i)); @@ -705,15 +735,16 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (!is_shadow_present_pte(iter.old_spte)) { sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); - list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages); child_pt = sp->spt; + + tdp_mmu_link_page(vcpu->kvm, sp, + huge_page_disallowed && + req_level >= iter.level); + new_spte = make_nonleaf_spte(child_pt, !shadow_accessed_mask); trace_kvm_mmu_get_page(sp, true); - if (huge_page_disallowed && req_level >= iter.level) - account_huge_nx_page(vcpu->kvm, sp); - tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); } } -- cgit v1.2.3 From 9a77daacc87dee9fd63e31243f21894132ed8407 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:26 -0800 Subject: KVM: x86/mmu: Use atomic ops to set SPTEs in TDP MMU map To prepare for handling page faults in parallel, change the TDP MMU page fault handler to use atomic operations to set SPTEs so that changes are not lost if multiple threads attempt to modify the same SPTE. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-21-bgardon@google.com> [Document new locking rules. - Paolo] Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/locking.rst | 9 ++- arch/x86/include/asm/kvm_host.h | 13 ++++ arch/x86/kvm/mmu/tdp_mmu.c | 142 ++++++++++++++++++++++++++++--------- 3 files changed, 130 insertions(+), 34 deletions(-) diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index b21a34c34a21..0aa4817b466d 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -16,7 +16,14 @@ The acquisition orders for mutexes are as follows: - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring them together is quite rare. -On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock. +On x86: + +- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock + +- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is + taken inside kvm->arch.mmu_lock, and cannot be taken without already + holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise + there's no need to take kvm->arch.tdp_mmu_pages_lock at all). Everything else is a leaf: no other lock is taken inside the critical sections. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c445a51244d3..bcbb32ef9f00 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1039,6 +1039,19 @@ struct kvm_arch { * tdp_mmu_page set and a root_count of 0. */ struct list_head tdp_mmu_pages; + + /* + * Protects accesses to the following fields when the MMU lock + * is held in read mode: + * - tdp_mmu_pages (above) + * - the link field of struct kvm_mmu_pages used by the TDP MMU + * - lpage_disallowed_mmu_pages + * - the lpage_disallowed_link field of struct kvm_mmu_pages used + * by the TDP MMU + * It is acceptable, but not necessary, to acquire this lock when + * the thread holds the MMU lock in write mode. + */ + spinlock_t tdp_mmu_pages_lock; }; struct kvm_vm_stat { diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 5a9e964e0178..0b5a9339ac55 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -7,6 +7,7 @@ #include "tdp_mmu.h" #include "spte.h" +#include #include #ifdef CONFIG_X86_64 @@ -33,6 +34,7 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm) kvm->arch.tdp_mmu_enabled = true; INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); + spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); } @@ -225,7 +227,8 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) } static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level); + u64 old_spte, u64 new_spte, int level, + bool shared); static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) { @@ -267,17 +270,26 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, * * @kvm: kvm instance * @sp: the new page + * @shared: This operation may not be running under the exclusive use of + * the MMU lock and the operation must synchronize with other + * threads that might be adding or removing pages. * @account_nx: This page replaces a NX large page and should be marked for * eventual reclaim. */ static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, - bool account_nx) + bool shared, bool account_nx) { - lockdep_assert_held_write(&kvm->mmu_lock); + if (shared) + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + else + lockdep_assert_held_write(&kvm->mmu_lock); list_add(&sp->link, &kvm->arch.tdp_mmu_pages); if (account_nx) account_huge_nx_page(kvm, sp); + + if (shared) + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); } /** @@ -285,14 +297,24 @@ static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, * * @kvm: kvm instance * @sp: the page to be removed + * @shared: This operation may not be running under the exclusive use of + * the MMU lock and the operation must synchronize with other + * threads that might be adding or removing pages. */ -static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp) +static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, + bool shared) { - lockdep_assert_held_write(&kvm->mmu_lock); + if (shared) + spin_lock(&kvm->arch.tdp_mmu_pages_lock); + else + lockdep_assert_held_write(&kvm->mmu_lock); list_del(&sp->link); if (sp->lpage_disallowed) unaccount_huge_nx_page(kvm, sp); + + if (shared) + spin_unlock(&kvm->arch.tdp_mmu_pages_lock); } /** @@ -300,28 +322,39 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp) * * @kvm: kvm instance * @pt: the page removed from the paging structure + * @shared: This operation may not be running under the exclusive use + * of the MMU lock and the operation must synchronize with other + * threads that might be modifying SPTEs. * * Given a page table that has been removed from the TDP paging structure, * iterates through the page table to clear SPTEs and free child page tables. */ -static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt) +static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, + bool shared) { struct kvm_mmu_page *sp = sptep_to_sp(pt); int level = sp->role.level; gfn_t gfn = sp->gfn; u64 old_child_spte; + u64 *sptep; int i; trace_kvm_mmu_prepare_zap_page(sp); - tdp_mmu_unlink_page(kvm, sp); + tdp_mmu_unlink_page(kvm, sp, shared); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - old_child_spte = READ_ONCE(*(pt + i)); - WRITE_ONCE(*(pt + i), 0); + sptep = pt + i; + + if (shared) { + old_child_spte = xchg(sptep, 0); + } else { + old_child_spte = READ_ONCE(*sptep); + WRITE_ONCE(*sptep, 0); + } handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)), - old_child_spte, 0, level - 1); + old_child_spte, 0, level - 1, shared); } kvm_flush_remote_tlbs_with_address(kvm, gfn, @@ -338,12 +371,16 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt) * @old_spte: The value of the SPTE before the change * @new_spte: The value of the SPTE after the change * @level: the level of the PT the SPTE is part of in the paging structure + * @shared: This operation may not be running under the exclusive use of + * the MMU lock and the operation must synchronize with other + * threads that might be modifying SPTEs. * * Handle bookkeeping that might result from the modification of a SPTE. * This function must be called for all TDP SPTE modifications. */ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level) + u64 old_spte, u64 new_spte, int level, + bool shared) { bool was_present = is_shadow_present_pte(old_spte); bool is_present = is_shadow_present_pte(new_spte); @@ -415,18 +452,51 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, */ if (was_present && !was_leaf && (pfn_changed || !is_present)) handle_removed_tdp_mmu_page(kvm, - spte_to_child_pt(old_spte, level)); + spte_to_child_pt(old_spte, level), shared); } static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level) + u64 old_spte, u64 new_spte, int level, + bool shared) { - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level); + __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, + shared); handle_changed_spte_acc_track(old_spte, new_spte, level); handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte, level); } +/* + * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the + * associated bookkeeping + * + * @kvm: kvm instance + * @iter: a tdp_iter instance currently on the SPTE that should be set + * @new_spte: The value the SPTE should be set to + * Returns: true if the SPTE was set, false if it was not. If false is returned, + * this function will have no side-effects. + */ +static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter, + u64 new_spte) +{ + u64 *root_pt = tdp_iter_root_pt(iter); + struct kvm_mmu_page *root = sptep_to_sp(root_pt); + int as_id = kvm_mmu_page_as_id(root); + + lockdep_assert_held_read(&kvm->mmu_lock); + + if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, + new_spte) != iter->old_spte) + return false; + + handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, + iter->level, true); + + return true; +} + + /* * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping * @kvm: kvm instance @@ -456,7 +526,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, - iter->level); + iter->level, false); if (record_acc_track) handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); @@ -630,23 +700,18 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, int ret = 0; int make_spte_ret = 0; - if (unlikely(is_noslot_pfn(pfn))) { + if (unlikely(is_noslot_pfn(pfn))) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); - trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, - new_spte); - } else { + else make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, pfn, iter->old_spte, prefault, true, map_writable, !shadow_accessed_mask, &new_spte); - trace_kvm_mmu_set_spte(iter->level, iter->gfn, - rcu_dereference(iter->sptep)); - } if (new_spte == iter->old_spte) ret = RET_PF_SPURIOUS; - else - tdp_mmu_set_spte(vcpu->kvm, iter, new_spte); + else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) + return RET_PF_RETRY; /* * If the page fault was caused by a write but the page is write @@ -660,8 +725,13 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, } /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ - if (unlikely(is_mmio_spte(new_spte))) + if (unlikely(is_mmio_spte(new_spte))) { + trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, + new_spte); ret = RET_PF_EMULATE; + } else + trace_kvm_mmu_set_spte(iter->level, iter->gfn, + rcu_dereference(iter->sptep)); trace_kvm_mmu_set_spte(iter->level, iter->gfn, rcu_dereference(iter->sptep)); @@ -720,7 +790,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, */ if (is_shadow_present_pte(iter.old_spte) && is_large_pte(iter.old_spte)) { - tdp_mmu_set_spte(vcpu->kvm, &iter, 0); + if (!tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, 0)) + break; kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn, KVM_PAGES_PER_HPAGE(iter.level)); @@ -737,19 +808,24 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); child_pt = sp->spt; - tdp_mmu_link_page(vcpu->kvm, sp, - huge_page_disallowed && - req_level >= iter.level); - new_spte = make_nonleaf_spte(child_pt, !shadow_accessed_mask); - trace_kvm_mmu_get_page(sp, true); - tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); + if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, + new_spte)) { + tdp_mmu_link_page(vcpu->kvm, sp, true, + huge_page_disallowed && + req_level >= iter.level); + + trace_kvm_mmu_get_page(sp, true); + } else { + tdp_mmu_free_sp(sp); + break; + } } } - if (WARN_ON(iter.level != level)) { + if (iter.level != level) { rcu_read_unlock(); return RET_PF_RETRY; } -- cgit v1.2.3 From 08f07c800e9d35b59d0c8346333f189160bd67d4 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:27 -0800 Subject: KVM: x86/mmu: Flush TLBs after zap in TDP MMU PF handler When the TDP MMU is allowed to handle page faults in parallel there is the possiblity of a race where an SPTE is cleared and then imediately replaced with a present SPTE pointing to a different PFN, before the TLBs can be flushed. This race would violate architectural specs. Ensure that the TLBs are flushed properly before other threads are allowed to install any present value for the SPTE. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-22-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/spte.h | 21 +++++++++++++++- arch/x86/kvm/mmu/tdp_mmu.c | 63 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 74 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 398fd1bb13a7..6de3950fd704 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -130,6 +130,25 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; PT64_EPT_EXECUTABLE_MASK) #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT +/* + * If a thread running without exclusive control of the MMU lock must perform a + * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a + * non-present intermediate value. Other threads which encounter this value + * should not modify the SPTE. + * + * This constant works because it is considered non-present on both AMD and + * Intel CPUs and does not create a L1TF vulnerability because the pfn section + * is zeroed out. + * + * Only used by the TDP MMU. + */ +#define REMOVED_SPTE (1ull << 59) + +static inline bool is_removed_spte(u64 spte) +{ + return spte == REMOVED_SPTE; +} + /* * In some cases, we need to preserve the GFN of a non-present or reserved * SPTE when we usurp the upper five bits of the physical address space to @@ -187,7 +206,7 @@ static inline bool is_access_track_spte(u64 spte) static inline bool is_shadow_present_pte(u64 pte) { - return (pte != 0) && !is_mmio_spte(pte); + return (pte != 0) && !is_mmio_spte(pte) && !is_removed_spte(pte); } static inline bool is_large_pte(u64 pte) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 0b5a9339ac55..7a2cdfeac4d2 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -427,15 +427,19 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, */ if (!was_present && !is_present) { /* - * If this change does not involve a MMIO SPTE, it is - * unexpected. Log the change, though it should not impact the - * guest since both the former and current SPTEs are nonpresent. + * If this change does not involve a MMIO SPTE or removed SPTE, + * it is unexpected. Log the change, though it should not + * impact the guest since both the former and current SPTEs + * are nonpresent. */ - if (WARN_ON(!is_mmio_spte(old_spte) && !is_mmio_spte(new_spte))) + if (WARN_ON(!is_mmio_spte(old_spte) && + !is_mmio_spte(new_spte) && + !is_removed_spte(new_spte))) pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" "should not be replaced with another,\n" "different nonpresent SPTE, unless one or both\n" - "are MMIO SPTEs.\n" + "are MMIO SPTEs, or the new SPTE is\n" + "a temporary removed SPTE.\n" "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", as_id, gfn, old_spte, new_spte, level); return; @@ -486,6 +490,13 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, lockdep_assert_held_read(&kvm->mmu_lock); + /* + * Do not change removed SPTEs. Only the thread that froze the SPTE + * may modify it. + */ + if (iter->old_spte == REMOVED_SPTE) + return false; + if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, new_spte) != iter->old_spte) return false; @@ -496,6 +507,34 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, return true; } +static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, + struct tdp_iter *iter) +{ + /* + * Freeze the SPTE by setting it to a special, + * non-present value. This will stop other threads from + * immediately installing a present entry in its place + * before the TLBs are flushed. + */ + if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) + return false; + + kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, + KVM_PAGES_PER_HPAGE(iter->level)); + + /* + * No other thread can overwrite the removed SPTE as they + * must either wait on the MMU lock or use + * tdp_mmu_set_spte_atomic which will not overrite the + * special removed SPTE value. No bookkeeping is needed + * here since the SPTE is going from non-present + * to non-present. + */ + WRITE_ONCE(*iter->sptep, 0); + + return true; +} + /* * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping @@ -523,6 +562,15 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, lockdep_assert_held_write(&kvm->mmu_lock); + /* + * No thread should be using this function to set SPTEs to the + * temporary removed SPTE value. + * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic + * should be used. If operating under the MMU lock in write mode, the + * use of the removed SPTE should not be necessary. + */ + WARN_ON(iter->old_spte == REMOVED_SPTE); + WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, @@ -790,12 +838,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, */ if (is_shadow_present_pte(iter.old_spte) && is_large_pte(iter.old_spte)) { - if (!tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, 0)) + if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) break; - kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn, - KVM_PAGES_PER_HPAGE(iter.level)); - /* * The iter must explicitly re-read the spte here * because the new value informs the !present -- cgit v1.2.3 From e25f0e0cd51bfb1e2e6121373c68f1427266bba7 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:28 -0800 Subject: KVM: x86/mmu: Mark SPTEs in disconnected pages as removed When clearing TDP MMU pages what have been disconnected from the paging structure root, set the SPTEs to a special non-present value which will not be overwritten by other threads. This is needed to prevent races in which a thread is clearing a disconnected page table, but another thread has already acquired a pointer to that memory and installs a mapping in an already cleared entry. This can lead to memory leaks and accounting errors. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-23-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7a2cdfeac4d2..aa0845d5e1e4 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -334,9 +334,10 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, { struct kvm_mmu_page *sp = sptep_to_sp(pt); int level = sp->role.level; - gfn_t gfn = sp->gfn; + gfn_t base_gfn = sp->gfn; u64 old_child_spte; u64 *sptep; + gfn_t gfn; int i; trace_kvm_mmu_prepare_zap_page(sp); @@ -345,16 +346,39 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt, for (i = 0; i < PT64_ENT_PER_PAGE; i++) { sptep = pt + i; + gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)); if (shared) { - old_child_spte = xchg(sptep, 0); + /* + * Set the SPTE to a nonpresent value that other + * threads will not overwrite. If the SPTE was + * already marked as removed then another thread + * handling a page fault could overwrite it, so + * set the SPTE until it is set from some other + * value to the removed SPTE value. + */ + for (;;) { + old_child_spte = xchg(sptep, REMOVED_SPTE); + if (!is_removed_spte(old_child_spte)) + break; + cpu_relax(); + } } else { old_child_spte = READ_ONCE(*sptep); - WRITE_ONCE(*sptep, 0); + + /* + * Marking the SPTE as a removed SPTE is not + * strictly necessary here as the MMU lock will + * stop other threads from concurrently modifying + * this SPTE. Using the removed SPTE value keeps + * the two branches consistent and simplifies + * the function. + */ + WRITE_ONCE(*sptep, REMOVED_SPTE); } - handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), - gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)), - old_child_spte, 0, level - 1, shared); + handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, + old_child_spte, REMOVED_SPTE, level - 1, + shared); } kvm_flush_remote_tlbs_with_address(kvm, gfn, -- cgit v1.2.3 From a2855afc7ee88475e8feb16840b23f787bfc994d Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:29 -0800 Subject: KVM: x86/mmu: Allow parallel page faults for the TDP MMU Make the last few changes necessary to enable the TDP MMU to handle page faults in parallel while holding the mmu_lock in read mode. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-24-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 329930d57774..e8bfff9acd5e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3718,7 +3718,12 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return r; r = RET_PF_RETRY; - write_lock(&vcpu->kvm->mmu_lock); + + if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + read_lock(&vcpu->kvm->mmu_lock); + else + write_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; r = make_mmu_pages_available(vcpu); @@ -3733,7 +3738,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, prefault, is_tdp); out_unlock: - write_unlock(&vcpu->kvm->mmu_lock); + if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) + read_unlock(&vcpu->kvm->mmu_lock); + else + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } -- cgit v1.2.3 From 1232f8e6c9d15e6d043452f5d8895fcebc6a1ff2 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 13 Jun 2018 06:10:37 -0400 Subject: KVM: x86/xen: fix Xen hypercall page msr handling Xen usually places its MSR at 0x40000000 or 0x40000200 depending on whether it is running in viridian mode or not. Note that this is not ABI guaranteed, so it is possible for Xen to advertise the MSR some place else. Given the way xen_hvm_config() is handled, if the former address is selected, this will conflict with Hyper-V's MSR (HV_X64_MSR_GUEST_OS_ID) which unconditionally uses the same address. Given that the MSR location is arbitrary, move the xen_hvm_config() handling to the top of kvm_set_msr_common() before falling through. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b3b1cce939ab..e3d354d27add 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3029,6 +3029,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u32 msr = msr_info->index; u64 data = msr_info->data; + if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) + return xen_hvm_config(vcpu, data); + switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_WRITE: @@ -3304,8 +3307,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.msr_misc_features_enables = data; break; default: - if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) - return xen_hvm_config(vcpu, data); if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); return KVM_MSR_RET_INVALID; -- cgit v1.2.3 From 92f4d400a407235783afd4399fa26c4c665024b5 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 2 Feb 2021 11:05:10 +0000 Subject: KVM: x86/xen: Fix __user pointer handling for hypercall page installation The address we give to memdup_user() isn't correctly tagged as __user. This is harmless enough as it's a one-off use and we're doing exactly the right thing, but fix it anyway to shut the checker up. Otherwise it'll whine when the (now legacy) code gets moved around in a later patch. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e3d354d27add..34ca136a608e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2874,8 +2874,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; + u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 + : kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; @@ -2885,7 +2885,9 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) if (page_num >= blob_size) return 1; - page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); + blob_addr += page_num * PAGE_SIZE; + + page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); if (IS_ERR(page)) return PTR_ERR(page); -- cgit v1.2.3 From 23200b7a30de315d0e9a40663c905869d29d833c Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 13 Jun 2018 09:55:44 -0400 Subject: KVM: x86/xen: intercept xen hypercalls if enabled Add a new exit reason for emulator to handle Xen hypercalls. Since this means KVM owns the ABI, dispense with the facility for the VMM to provide its own copy of the hypercall pages; just fill them in directly using VMCALL/VMMCALL as we do for the Hyper-V hypercall page. This behaviour is enabled by a new INTERCEPT_HCALL flag in the KVM_XEN_HVM_CONFIG ioctl structure, and advertised by the same flag being returned from the KVM_CAP_XEN_HVM check. Rename xen_hvm_config() to kvm_xen_write_hypercall_page() and move it to the nascent xen.c while we're at it, and add a test case. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 6 + arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/trace.h | 36 ++++++ arch/x86/kvm/x86.c | 49 +++---- arch/x86/kvm/xen.c | 142 +++++++++++++++++++++ arch/x86/kvm/xen.h | 21 +++ include/uapi/linux/kvm.h | 20 +++ tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/lib/kvm_util.c | 1 + .../testing/selftests/kvm/x86_64/xen_vmcall_test.c | 123 ++++++++++++++++++ 10 files changed, 369 insertions(+), 32 deletions(-) create mode 100644 arch/x86/kvm/xen.c create mode 100644 arch/x86/kvm/xen.h create mode 100644 tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bcbb32ef9f00..b4bcdebd6e4c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -531,6 +531,11 @@ struct kvm_vcpu_hv { cpumask_t tlb_flush; }; +/* Xen HVM per vcpu emulation context */ +struct kvm_vcpu_xen { + u64 hypercall_rip; +}; + struct kvm_vcpu_arch { /* * rip and regs accesses must go through @@ -729,6 +734,7 @@ struct kvm_vcpu_arch { unsigned long singlestep_rip; struct kvm_vcpu_hv hyperv; + struct kvm_vcpu_xen xen; cpumask_var_t wbinvd_dirty_mask; diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 4bd14ab01323..a50041235530 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -14,7 +14,7 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ $(KVM)/dirty_ring.o kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o -kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ +kvm-y += x86.o emulate.o i8259.o irq.o lapic.o xen.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 5ef238621881..a61c015870e3 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -92,6 +92,42 @@ TRACE_EVENT(kvm_hv_hypercall, __entry->outgpa) ); +/* + * Tracepoint for Xen hypercall. + */ +TRACE_EVENT(kvm_xen_hypercall, + TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5), + TP_ARGS(nr, a0, a1, a2, a3, a4, a5), + + TP_STRUCT__entry( + __field(unsigned long, nr) + __field(unsigned long, a0) + __field(unsigned long, a1) + __field(unsigned long, a2) + __field(unsigned long, a3) + __field(unsigned long, a4) + __field(unsigned long, a5) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->a0 = a0; + __entry->a1 = a1; + __entry->a2 = a2; + __entry->a3 = a3; + __entry->a4 = a4; + __entry->a4 = a5; + ), + + TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", + __entry->nr, __entry->a0, __entry->a1, __entry->a2, + __entry->a3, __entry->a4, __entry->a5) +); + + + /* * Tracepoint for PIO. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 34ca136a608e..5a41d465134d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -29,6 +29,7 @@ #include "pmu.h" #include "hyperv.h" #include "lapic.h" +#include "xen.h" #include #include @@ -2870,34 +2871,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 0; } -static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) -{ - struct kvm *kvm = vcpu->kvm; - int lm = is_long_mode(vcpu); - u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 - : kvm->arch.xen_hvm_config.blob_addr_32; - u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 - : kvm->arch.xen_hvm_config.blob_size_32; - u32 page_num = data & ~PAGE_MASK; - u64 page_addr = data & PAGE_MASK; - u8 *page; - - if (page_num >= blob_size) - return 1; - - blob_addr += page_num * PAGE_SIZE; - - page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); - if (IS_ERR(page)) - return PTR_ERR(page); - - if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { - kfree(page); - return 1; - } - return 0; -} - static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) { u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; @@ -3032,7 +3005,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u64 data = msr_info->data; if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) - return xen_hvm_config(vcpu, data); + return kvm_xen_write_hypercall_page(vcpu, data); switch (msr) { case MSR_AMD64_NB_CFG: @@ -3741,7 +3714,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: - case KVM_CAP_XEN_HVM: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: @@ -3781,6 +3753,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: r = 1; break; + case KVM_CAP_XEN_HVM: + r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; + break; case KVM_CAP_SYNC_REGS: r = KVM_SYNC_X86_VALID_FIELDS; break; @@ -5652,7 +5628,15 @@ set_pit2_out: if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; r = -EINVAL; - if (xhc.flags) + if (xhc.flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) + goto out; + /* + * With hypercall interception the kernel generates its own + * hypercall page so it must not be provided. + */ + if ((xhc.flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && + (xhc.blob_addr_32 || xhc.blob_addr_64 || + xhc.blob_size_32 || xhc.blob_size_64)) goto out; memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); r = 0; @@ -8143,6 +8127,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit; + if (kvm_xen_hypercall_enabled(vcpu->kvm)) + return kvm_xen_hypercall(vcpu); + if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c new file mode 100644 index 000000000000..62569ca43857 --- /dev/null +++ b/arch/x86/kvm/xen.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * KVM Xen emulation + */ + +#include "x86.h" +#include "xen.h" + +#include + +#include + +#include "trace.h" + +int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) +{ + struct kvm *kvm = vcpu->kvm; + u32 page_num = data & ~PAGE_MASK; + u64 page_addr = data & PAGE_MASK; + + /* + * If Xen hypercall intercept is enabled, fill the hypercall + * page with VMCALL/VMMCALL instructions since that's what + * we catch. Else the VMM has provided the hypercall pages + * with instructions of its own choosing, so use those. + */ + if (kvm_xen_hypercall_enabled(kvm)) { + u8 instructions[32]; + int i; + + if (page_num) + return 1; + + /* mov imm32, %eax */ + instructions[0] = 0xb8; + + /* vmcall / vmmcall */ + kvm_x86_ops.patch_hypercall(vcpu, instructions + 5); + + /* ret */ + instructions[8] = 0xc3; + + /* int3 to pad */ + memset(instructions + 9, 0xcc, sizeof(instructions) - 9); + + for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) { + *(u32 *)&instructions[1] = i; + if (kvm_vcpu_write_guest(vcpu, + page_addr + (i * sizeof(instructions)), + instructions, sizeof(instructions))) + return 1; + } + } else { + int lm = is_long_mode(vcpu); + u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 + : kvm->arch.xen_hvm_config.blob_addr_32; + u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 + : kvm->arch.xen_hvm_config.blob_size_32; + u8 *page; + + if (page_num >= blob_size) + return 1; + + blob_addr += page_num * PAGE_SIZE; + + page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { + kfree(page); + return 1; + } + } + return 0; +} + +static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) +{ + kvm_rax_write(vcpu, result); + return kvm_skip_emulated_instruction(vcpu); +} + +static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) + return 1; + + return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); +} + +int kvm_xen_hypercall(struct kvm_vcpu *vcpu) +{ + bool longmode; + u64 input, params[6]; + + input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); + + longmode = is_64_bit_mode(vcpu); + if (!longmode) { + params[0] = (u32)kvm_rbx_read(vcpu); + params[1] = (u32)kvm_rcx_read(vcpu); + params[2] = (u32)kvm_rdx_read(vcpu); + params[3] = (u32)kvm_rsi_read(vcpu); + params[4] = (u32)kvm_rdi_read(vcpu); + params[5] = (u32)kvm_rbp_read(vcpu); + } +#ifdef CONFIG_X86_64 + else { + params[0] = (u64)kvm_rdi_read(vcpu); + params[1] = (u64)kvm_rsi_read(vcpu); + params[2] = (u64)kvm_rdx_read(vcpu); + params[3] = (u64)kvm_r10_read(vcpu); + params[4] = (u64)kvm_r8_read(vcpu); + params[5] = (u64)kvm_r9_read(vcpu); + } +#endif + trace_kvm_xen_hypercall(input, params[0], params[1], params[2], + params[3], params[4], params[5]); + + vcpu->run->exit_reason = KVM_EXIT_XEN; + vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; + vcpu->run->xen.u.hcall.longmode = longmode; + vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu); + vcpu->run->xen.u.hcall.input = input; + vcpu->run->xen.u.hcall.params[0] = params[0]; + vcpu->run->xen.u.hcall.params[1] = params[1]; + vcpu->run->xen.u.hcall.params[2] = params[2]; + vcpu->run->xen.u.hcall.params[3] = params[3]; + vcpu->run->xen.u.hcall.params[4] = params[4]; + vcpu->run->xen.u.hcall.params[5] = params[5]; + vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); + vcpu->arch.complete_userspace_io = + kvm_xen_hypercall_complete_userspace; + + return 0; +} diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h new file mode 100644 index 000000000000..276ed59e476b --- /dev/null +++ b/arch/x86/kvm/xen.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * KVM Xen emulation + */ + +#ifndef __ARCH_X86_KVM_XEN_H__ +#define __ARCH_X86_KVM_XEN_H__ + +int kvm_xen_hypercall(struct kvm_vcpu *vcpu); +int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); + +static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) +{ + return kvm->arch.xen_hvm_config.flags & + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; +} + +#endif /* __ARCH_X86_KVM_XEN_H__ */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index dfe3ba5cf262..c87defe5db4f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -216,6 +216,20 @@ struct kvm_hyperv_exit { } u; }; +struct kvm_xen_exit { +#define KVM_EXIT_XEN_HCALL 1 + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; +}; + #define KVM_S390_GET_SKEYS_NONE 1 #define KVM_S390_SKEYS_MAX 1048576 @@ -253,6 +267,7 @@ struct kvm_hyperv_exit { #define KVM_EXIT_DIRTY_RING_FULL 31 #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 +#define KVM_EXIT_XEN 34 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -429,6 +444,8 @@ struct kvm_run { __u32 index; /* kernel -> user */ __u64 data; /* kernel <-> user */ } msr; + /* KVM_EXIT_XEN */ + struct kvm_xen_exit xen; /* Fix the size of the union. */ char padding[256]; }; @@ -1133,6 +1150,9 @@ struct kvm_x86_mce { #endif #ifdef KVM_CAP_XEN_HVM +#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) +#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) + struct kvm_xen_hvm_config { __u32 flags; __u32 msr; diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 9616f77cc5af..be9613dd6d9b 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -62,6 +62,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test +TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index fa5a90e6c6f0..d787cb802b4a 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1801,6 +1801,7 @@ static struct exit_reason { {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, {KVM_EXIT_X86_RDMSR, "RDMSR"}, {KVM_EXIT_X86_WRMSR, "WRMSR"}, + {KVM_EXIT_XEN, "XEN"}, #ifdef KVM_EXIT_MEMORY_NOT_PRESENT {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, #endif diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c new file mode 100644 index 000000000000..234c9da286aa --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * xen_vmcall_test + * + * Copyright © 2020 Amazon.com, Inc. or its affiliates. + * + * Userspace hypercall testing + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 5 + +#define HCALL_REGION_GPA 0xc0000000ULL +#define HCALL_REGION_SLOT 10 + +static struct kvm_vm *vm; + +#define INPUTVALUE 17 +#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x) +#define RETVALUE 0xcafef00dfbfbffffUL + +#define XEN_HYPERCALL_MSR 0x40000000 + +static void guest_code(void) +{ + unsigned long rax = INPUTVALUE; + unsigned long rdi = ARGVALUE(1); + unsigned long rsi = ARGVALUE(2); + unsigned long rdx = ARGVALUE(3); + register unsigned long r10 __asm__("r10") = ARGVALUE(4); + register unsigned long r8 __asm__("r8") = ARGVALUE(5); + register unsigned long r9 __asm__("r9") = ARGVALUE(6); + + /* First a direct invocation of 'vmcall' */ + __asm__ __volatile__("vmcall" : + "=a"(rax) : + "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx), + "r"(r10), "r"(r8), "r"(r9)); + GUEST_ASSERT(rax == RETVALUE); + + /* Now fill in the hypercall page */ + __asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR), + "a" (HCALL_REGION_GPA & 0xffffffff), + "d" (HCALL_REGION_GPA >> 32)); + + /* And invoke the same hypercall that way */ + __asm__ __volatile__("call *%1" : "=a"(rax) : + "r"(HCALL_REGION_GPA + INPUTVALUE * 32), + "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx), + "r"(r10), "r"(r8), "r"(r9)); + GUEST_ASSERT(rax == RETVALUE); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + if (!(kvm_check_cap(KVM_CAP_XEN_HVM) & + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) { + print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available"); + exit(KSFT_SKIP); + } + + vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + struct kvm_xen_hvm_config hvmc = { + .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, + .msr = XEN_HYPERCALL_MSR, + }; + vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc); + + /* Map a region for the hypercall page */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + HCALL_REGION_GPA, HCALL_REGION_SLOT, + getpagesize(), 0); + virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 1, 0); + + for (;;) { + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct ucall uc; + + vcpu_run(vm, VCPU_ID); + + if (run->exit_reason == KVM_EXIT_XEN) { + ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); + ASSERT_EQ(run->xen.u.hcall.cpl, 0); + ASSERT_EQ(run->xen.u.hcall.longmode, 1); + ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); + ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); + ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); + ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); + ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); + ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5)); + ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6)); + run->xen.u.hcall.result = RETVALUE; + continue; + } + + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_FAIL("%s", (const char *)uc.args[0]); + /* NOT REACHED */ + case UCALL_SYNC: + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); + } + } +done: + kvm_vm_free(vm); + return 0; +} -- cgit v1.2.3 From 79033bebf6fa3045bfa9bbe543c0eb7b43a0f4a3 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 13 Jun 2018 09:55:44 -0400 Subject: KVM: x86/xen: Fix coexistence of Xen and Hyper-V hypercalls Disambiguate Xen vs. Hyper-V calls by adding 'orl $0x80000000, %eax' at the start of the Hyper-V hypercall page when Xen hypercalls are also enabled. That bit is reserved in the Hyper-V ABI, and those hypercall numbers will never be used by Xen (because it does precisely the same trick). Switch to using kvm_vcpu_write_guest() while we're at it, instead of open-coding it. Signed-off-by: David Woodhouse --- arch/x86/kvm/hyperv.c | 40 ++++++++++++++++------ arch/x86/kvm/xen.c | 6 ++++ .../testing/selftests/kvm/x86_64/xen_vmcall_test.c | 39 +++++++++++++++++---- 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 5c45d80a7ce3..af7278abe16b 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -23,6 +23,7 @@ #include "ioapic.h" #include "cpuid.h" #include "hyperv.h" +#include "xen.h" #include #include @@ -1139,9 +1140,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { - u64 gfn; - unsigned long addr; - u8 instructions[4]; + u8 instructions[9]; + int i = 0; + u64 addr; /* if guest os id is not set hypercall should remain disabled */ if (!hv->hv_guest_os_id) @@ -1150,16 +1151,33 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, hv->hv_hypercall = data; break; } - gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) - return 1; - static_call(kvm_x86_patch_hypercall)(vcpu, instructions); - ((unsigned char *)instructions)[3] = 0xc3; /* ret */ - if (__copy_to_user((void __user *)addr, instructions, 4)) + + /* + * If Xen and Hyper-V hypercalls are both enabled, disambiguate + * the same way Xen itself does, by setting the bit 31 of EAX + * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just + * going to be clobbered on 64-bit. + */ + if (kvm_xen_hypercall_enabled(kvm)) { + /* orl $0x80000000, %eax */ + instructions[i++] = 0x0d; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x00; + instructions[i++] = 0x80; + } + + /* vmcall/vmmcall */ + static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i); + i += 3; + + /* ret */ + ((unsigned char *)instructions)[i++] = 0xc3; + + addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK; + if (kvm_vcpu_write_guest(vcpu, addr, instructions, i)) return 1; hv->hv_hypercall = data; - mark_page_dirty(kvm, gfn); break; } case HV_X64_MSR_REFERENCE_TSC: diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 62569ca43857..19bcb2bfba86 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -8,6 +8,7 @@ #include "x86.h" #include "xen.h" +#include "hyperv.h" #include @@ -101,6 +102,11 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); + /* Hyper-V hypercalls get bit 31 set in EAX */ + if ((input & 0x80000000) && + kvm_hv_hypercall_enabled(vcpu->kvm)) + return kvm_hv_hypercall(vcpu); + longmode = is_64_bit_mode(vcpu); if (!longmode) { params[0] = (u32)kvm_rbx_read(vcpu); diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index 234c9da286aa..6e7b069322fd 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -15,6 +15,7 @@ #define HCALL_REGION_GPA 0xc0000000ULL #define HCALL_REGION_SLOT 10 +#define PAGE_SIZE 4096 static struct kvm_vm *vm; @@ -22,7 +23,12 @@ static struct kvm_vm *vm; #define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x) #define RETVALUE 0xcafef00dfbfbffffUL -#define XEN_HYPERCALL_MSR 0x40000000 +#define XEN_HYPERCALL_MSR 0x40000200 +#define HV_GUEST_OS_ID_MSR 0x40000000 +#define HV_HYPERCALL_MSR 0x40000001 + +#define HVCALL_SIGNAL_EVENT 0x005d +#define HV_STATUS_INVALID_ALIGNMENT 4 static void guest_code(void) { @@ -30,6 +36,7 @@ static void guest_code(void) unsigned long rdi = ARGVALUE(1); unsigned long rsi = ARGVALUE(2); unsigned long rdx = ARGVALUE(3); + unsigned long rcx; register unsigned long r10 __asm__("r10") = ARGVALUE(4); register unsigned long r8 __asm__("r8") = ARGVALUE(5); register unsigned long r9 __asm__("r9") = ARGVALUE(6); @@ -41,18 +48,38 @@ static void guest_code(void) "r"(r10), "r"(r8), "r"(r9)); GUEST_ASSERT(rax == RETVALUE); - /* Now fill in the hypercall page */ + /* Fill in the Xen hypercall page */ __asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR), "a" (HCALL_REGION_GPA & 0xffffffff), "d" (HCALL_REGION_GPA >> 32)); - /* And invoke the same hypercall that way */ + /* Set Hyper-V Guest OS ID */ + __asm__ __volatile__("wrmsr" : : "c" (HV_GUEST_OS_ID_MSR), + "a" (0x5a), "d" (0)); + + /* Hyper-V hypercall page */ + u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1; + __asm__ __volatile__("wrmsr" : : "c" (HV_HYPERCALL_MSR), + "a" (msrval & 0xffffffff), + "d" (msrval >> 32)); + + /* Invoke a Xen hypercall */ __asm__ __volatile__("call *%1" : "=a"(rax) : "r"(HCALL_REGION_GPA + INPUTVALUE * 32), "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx), "r"(r10), "r"(r8), "r"(r9)); GUEST_ASSERT(rax == RETVALUE); + /* Invoke a Hyper-V hypercall */ + rax = 0; + rcx = HVCALL_SIGNAL_EVENT; /* code */ + rdx = 0x5a5a5a5a; /* ingpa (badly aligned) */ + __asm__ __volatile__("call *%1" : "=a"(rax) : + "r"(HCALL_REGION_GPA + PAGE_SIZE), + "a"(rax), "c"(rcx), "d"(rdx), + "r"(r8)); + GUEST_ASSERT(rax == HV_STATUS_INVALID_ALIGNMENT); + GUEST_DONE(); } @@ -73,11 +100,11 @@ int main(int argc, char *argv[]) }; vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc); - /* Map a region for the hypercall page */ + /* Map a region for the hypercall pages */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, HCALL_REGION_GPA, HCALL_REGION_SLOT, - getpagesize(), 0); - virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 1, 0); + 2 * getpagesize(), 0); + virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2, 0); for (;;) { volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); -- cgit v1.2.3 From 78e9878cb376969d184a4b7fff392528fe17a3ce Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 2 Feb 2021 13:19:35 +0000 Subject: KVM: x86/xen: Move KVM_XEN_HVM_CONFIG handling to xen.c This is already more complex than the simple memcpy it originally had. Move it to xen.c with the rest of the Xen support. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 14 +------------- arch/x86/kvm/xen.c | 18 ++++++++++++++++++ arch/x86/kvm/xen.h | 1 + 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5a41d465134d..673dcc6fde7e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5627,19 +5627,7 @@ set_pit2_out: r = -EFAULT; if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; - r = -EINVAL; - if (xhc.flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) - goto out; - /* - * With hypercall interception the kernel generates its own - * hypercall page so it must not be provided. - */ - if ((xhc.flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && - (xhc.blob_addr_32 || xhc.blob_addr_64 || - xhc.blob_size_32 || xhc.blob_size_64)) - goto out; - memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); - r = 0; + r = kvm_xen_hvm_config(kvm, &xhc); break; } case KVM_SET_CLOCK: { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 19bcb2bfba86..b52549fc6dbc 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -79,6 +79,24 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) return 0; } +int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) +{ + if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) + return -EINVAL; + + /* + * With hypercall interception the kernel generates its own + * hypercall page so it must not be provided. + */ + if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && + (xhc->blob_addr_32 || xhc->blob_addr_64 || + xhc->blob_size_32 || xhc->blob_size_64)) + return -EINVAL; + + memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); + return 0; +} + static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) { kvm_rax_write(vcpu, result); diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 276ed59e476b..28e9c9892628 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -11,6 +11,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu); int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); +int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) { -- cgit v1.2.3 From 7d6bbebb7bb0294ede7bda04847272854e34f87c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 2 Feb 2021 15:48:05 +0000 Subject: KVM: x86/xen: Add kvm_xen_enabled static key The code paths for Xen support are all fairly lightweight but if we hide them behind this, they're even *more* lightweight for any system which isn't actually hosting Xen guests. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 2 ++ arch/x86/kvm/xen.c | 17 +++++++++++++++++ arch/x86/kvm/xen.h | 10 ++++++++-- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 673dcc6fde7e..ffc4ab3bd6ec 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7988,6 +7988,7 @@ void kvm_arch_exit(void) kvm_mmu_module_exit(); free_percpu(user_return_msrs); kmem_cache_destroy(x86_fpu_cache); + WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); } static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) @@ -10581,6 +10582,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); kvm_mmu_uninit_vm(kvm); kvm_page_track_cleanup(kvm); + kvm_xen_destroy_vm(kvm); kvm_hv_destroy_vm(kvm); } diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index b52549fc6dbc..7d03d918e595 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -16,6 +16,8 @@ #include "trace.h" +DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); + int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; @@ -93,10 +95,25 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) xhc->blob_size_32 || xhc->blob_size_64)) return -EINVAL; + mutex_lock(&kvm->lock); + + if (xhc->msr && !kvm->arch.xen_hvm_config.msr) + static_branch_inc(&kvm_xen_enabled.key); + else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) + static_branch_slow_dec_deferred(&kvm_xen_enabled); + memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); + + mutex_unlock(&kvm->lock); return 0; } +void kvm_xen_destroy_vm(struct kvm *kvm) +{ + if (kvm->arch.xen_hvm_config.msr) + static_branch_slow_dec_deferred(&kvm_xen_enabled); +} + static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) { kvm_rax_write(vcpu, result); diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 28e9c9892628..ec3d8f6d0ef5 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -9,14 +9,20 @@ #ifndef __ARCH_X86_KVM_XEN_H__ #define __ARCH_X86_KVM_XEN_H__ +#include + +extern struct static_key_false_deferred kvm_xen_enabled; + int kvm_xen_hypercall(struct kvm_vcpu *vcpu); int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); +void kvm_xen_destroy_vm(struct kvm *kvm); static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) { - return kvm->arch.xen_hvm_config.flags & - KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; + return static_branch_unlikely(&kvm_xen_enabled.key) && + (kvm->arch.xen_hvm_config.flags & + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); } #endif /* __ARCH_X86_KVM_XEN_H__ */ -- cgit v1.2.3 From a76b9641ad1c0b045045727a6cbbeebf80b6b9bb Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 3 Dec 2020 15:52:25 +0000 Subject: KVM: x86/xen: add KVM_XEN_HVM_SET_ATTR/KVM_XEN_HVM_GET_ATTR This will be used to set up shared info pages etc. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ arch/x86/kvm/xen.c | 30 ++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 2 ++ include/uapi/linux/kvm.h | 11 +++++++++++ 4 files changed, 63 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ffc4ab3bd6ec..59bcba1ea59f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5630,6 +5630,26 @@ set_pit2_out: r = kvm_xen_hvm_config(kvm, &xhc); break; } + case KVM_XEN_HVM_GET_ATTR: { + struct kvm_xen_hvm_attr xha; + + r = -EFAULT; + if (copy_from_user(&xha, argp, sizeof(xha))) + goto out; + r = kvm_xen_hvm_get_attr(kvm, &xha); + if (!r && copy_to_user(argp, &xha, sizeof(xha))) + r = -EFAULT; + break; + } + case KVM_XEN_HVM_SET_ATTR: { + struct kvm_xen_hvm_attr xha; + + r = -EFAULT; + if (copy_from_user(&xha, argp, sizeof(xha))) + goto out; + r = kvm_xen_hvm_set_attr(kvm, &xha); + break; + } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 7d03d918e595..a3fd791b0354 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -18,6 +18,36 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); +int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + int r = -ENOENT; + + mutex_unlock(&kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&kvm->lock); + return r; +} + +int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&kvm->lock); + return r; +} + int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index ec3d8f6d0ef5..0e2467fcfb9f 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -13,6 +13,8 @@ extern struct static_key_false_deferred kvm_xen_enabled; +int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); +int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hypercall(struct kvm_vcpu *vcpu); int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index c87defe5db4f..334796799dbc 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1587,6 +1587,17 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_DIRTY_LOG_RING */ #define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) +#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) +#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) + +struct kvm_xen_hvm_attr { + __u16 type; + __u16 pad[3]; + union { + __u64 pad[8]; + } u; +}; + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3 From a3833b81b05d0ae92ae085959dd8da136ec91868 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Dec 2020 16:20:32 +0000 Subject: KVM: x86/xen: latch long_mode when hypercall page is set up Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 6 ++++++ arch/x86/kvm/xen.c | 16 +++++++++++++++- include/uapi/linux/kvm.h | 3 +++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4bcdebd6e4c..016a005cc2ad 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -905,6 +905,11 @@ struct msr_bitmap_range { unsigned long *bitmap; }; +/* Xen emulation context */ +struct kvm_xen { + bool long_mode; +}; + enum kvm_irqchip_mode { KVM_IRQCHIP_NONE, KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ @@ -984,6 +989,7 @@ struct kvm_arch { struct hlist_head mask_notifier_list; struct kvm_hv hyperv; + struct kvm_xen xen; #ifdef CONFIG_KVM_MMU_AUDIT int audit_point; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index a3fd791b0354..55da739267b1 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -25,6 +25,13 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) mutex_unlock(&kvm->lock); switch (data->type) { + case KVM_XEN_ATTR_TYPE_LONG_MODE: + if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) + return -EINVAL; + + kvm->arch.xen.long_mode = !!data->u.long_mode; + r = 0; + break; default: break; } @@ -40,6 +47,10 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) mutex_lock(&kvm->lock); switch (data->type) { + case KVM_XEN_ATTR_TYPE_LONG_MODE: + data->u.long_mode = kvm->arch.xen.long_mode; + r = 0; + break; default: break; } @@ -53,6 +64,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) struct kvm *kvm = vcpu->kvm; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; + bool lm = is_long_mode(vcpu); + + /* Latch long_mode for shared_info pages etc. */ + vcpu->kvm->arch.xen.long_mode = lm; /* * If Xen hypercall intercept is enabled, fill the hypercall @@ -87,7 +102,6 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) return 1; } } else { - int lm = is_long_mode(vcpu); u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 : kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 334796799dbc..11644954a2e2 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1594,10 +1594,13 @@ struct kvm_xen_hvm_attr { __u16 type; __u16 pad[3]; union { + __u8 long_mode; __u64 pad[8]; } u; }; +#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3 From 1ea9f2ed81c4b058a3dc73b75d684a1f451ce714 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Dec 2020 18:45:22 +0000 Subject: KVM: x86/xen: add definitions of compat_shared_info, compat_vcpu_info There aren't a lot of differences for the things that the kernel needs to care about, but there are a few. Signed-off-by: David Woodhouse --- arch/x86/kvm/xen.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 0e2467fcfb9f..12a3dc32e78e 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -27,4 +27,40 @@ static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); } + +/* 32-bit compatibility definitions, also used natively in 32-bit build */ +#include +#include + +struct compat_arch_vcpu_info { + unsigned int cr2; + unsigned int pad[5]; +}; + +struct compat_vcpu_info { + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + uint32_t evtchn_pending_sel; + struct compat_arch_vcpu_info arch; + struct pvclock_vcpu_time_info time; +}; /* 64 bytes (x86) */ + +struct compat_arch_shared_info { + unsigned int max_pfn; + unsigned int pfn_to_mfn_frame_list_list; + unsigned int nmi_reason; + unsigned int p2m_cr3; + unsigned int p2m_vaddr; + unsigned int p2m_generation; + uint32_t wc_sec_hi; +}; + +struct compat_shared_info { + struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; + uint32_t evtchn_pending[32]; + uint32_t evtchn_mask[32]; + struct pvclock_wall_clock wc; + struct compat_arch_shared_info arch; +}; + #endif /* __ARCH_X86_KVM_XEN_H__ */ -- cgit v1.2.3 From 13ffb97a3b11998450d51457b6b3617781953f7c Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 15 Jun 2018 21:17:14 -0400 Subject: KVM: x86/xen: register shared_info page Add KVM_XEN_ATTR_TYPE_SHARED_INFO to allow hypervisor to know where the guest's shared info page is. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/xen.c | 40 ++++++++++++++++++++++++++++++++++++---- include/uapi/linux/kvm.h | 4 ++++ 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 016a005cc2ad..00d1233cfd55 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -908,6 +908,8 @@ struct msr_bitmap_range { /* Xen emulation context */ struct kvm_xen { bool long_mode; + bool shinfo_set; + struct gfn_to_hva_cache shinfo_cache; }; enum kvm_irqchip_mode { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 55da739267b1..924d4e853108 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -13,25 +13,49 @@ #include #include +#include #include "trace.h" DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); +static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) +{ + int ret; + int idx = srcu_read_lock(&kvm->srcu); + + ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache, + gfn_to_gpa(gfn), PAGE_SIZE); + if (!ret) { + kvm->arch.xen.shinfo_set = true; + } + + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) { int r = -ENOENT; + mutex_lock(&kvm->lock); + mutex_unlock(&kvm->lock); switch (data->type) { case KVM_XEN_ATTR_TYPE_LONG_MODE: - if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) - return -EINVAL; + if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { + r = -EINVAL; + } else { + kvm->arch.xen.long_mode = !!data->u.long_mode; + r = 0; + } + break; - kvm->arch.xen.long_mode = !!data->u.long_mode; - r = 0; + case KVM_XEN_ATTR_TYPE_SHARED_INFO: + r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); break; + default: break; } @@ -51,6 +75,14 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) data->u.long_mode = kvm->arch.xen.long_mode; r = 0; break; + + case KVM_XEN_ATTR_TYPE_SHARED_INFO: + if (kvm->arch.xen.shinfo_set) { + data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); + r = 0; + } + break; + default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 11644954a2e2..f57f6e741a28 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1595,11 +1595,15 @@ struct kvm_xen_hvm_attr { __u16 pad[3]; union { __u8 long_mode; + struct { + __u64 gfn; + } shared_info; __u64 pad[8]; } u; }; #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 +#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3 From 42387042ba38cca8fb86bb3a7913e44cd3569750 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Dec 2020 21:02:23 +0000 Subject: xen: add wc_sec_hi to struct shared_info Xen added this in 2015 (Xen 4.6). On x86_64 and Arm it fills what was previously a 32-bit hole in the generic shared_info structure; on i386 it had to go at the end of struct arch_shared_info. Signed-off-by: David Woodhouse --- arch/x86/include/asm/xen/interface.h | 3 +++ include/xen/interface/xen.h | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index 9139b3e86316..baca0b00ef76 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h @@ -182,6 +182,9 @@ struct arch_shared_info { unsigned long p2m_cr3; /* cr3 value of the p2m address space */ unsigned long p2m_vaddr; /* virtual address of the p2m list */ unsigned long p2m_generation; /* generation count of p2m mapping */ +#ifdef CONFIG_X86_32 + uint32_t wc_sec_hi; +#endif }; #endif /* !__ASSEMBLY__ */ diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 8bfb242f433e..5ee37a296481 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -598,7 +598,9 @@ struct shared_info { * their gettimeofday() syscall on this wallclock-base value. */ struct pvclock_wall_clock wc; - +#ifndef CONFIG_X86_32 + uint32_t wc_sec_hi; +#endif struct arch_shared_info arch; }; -- cgit v1.2.3 From 629b5348841a10afce49fbe81619863fd839f217 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 28 Jun 2018 15:06:43 -0400 Subject: KVM: x86/xen: update wallclock region Wallclock on Xen is written in the shared_info page. To that purpose, export kvm_write_wall_clock() and pass on the GPA of its location to populate the shared_info wall clock data. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 17 ++++++++++++----- arch/x86/kvm/x86.h | 1 + arch/x86/kvm/xen.c | 33 ++++++++++++++++++++++++++++++--- 3 files changed, 43 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 59bcba1ea59f..040d3d470173 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1949,15 +1949,14 @@ static s64 get_kvmclock_base_ns(void) } #endif -static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) +void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) { int version; int r; struct pvclock_wall_clock wc; + u32 wc_sec_hi; u64 wall_nsec; - kvm->arch.wall_clock = wall_clock; - if (!wall_clock) return; @@ -1986,6 +1985,12 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); + if (sec_hi_ofs) { + wc_sec_hi = wall_nsec >> 32; + kvm_write_guest(kvm, wall_clock + sec_hi_ofs, + &wc_sec_hi, sizeof(wc_sec_hi)); + } + version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } @@ -3131,13 +3136,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) return 1; - kvm_write_wall_clock(vcpu->kvm, data); + vcpu->kvm->arch.wall_clock = data; + kvm_write_wall_clock(vcpu->kvm, data, 0); break; case MSR_KVM_WALL_CLOCK: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) return 1; - kvm_write_wall_clock(vcpu->kvm, data); + vcpu->kvm->arch.wall_clock = data; + kvm_write_wall_clock(vcpu->kvm, data, 0); break; case MSR_KVM_SYSTEM_TIME_NEW: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index cc652a348acc..39eb04887141 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -247,6 +247,7 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); } +void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs); void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 924d4e853108..eab4dce93be1 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -21,15 +21,42 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) { + gpa_t gpa = gfn_to_gpa(gfn); + int wc_ofs, sec_hi_ofs; int ret; int idx = srcu_read_lock(&kvm->srcu); ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache, - gfn_to_gpa(gfn), PAGE_SIZE); - if (!ret) { - kvm->arch.xen.shinfo_set = true; + gpa, PAGE_SIZE); + if (ret) + goto out; + + kvm->arch.xen.shinfo_set = true; + + /* Paranoia checks on the 32-bit struct layout */ + BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); + BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); + BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + + /* 32-bit location by default */ + wc_ofs = offsetof(struct compat_shared_info, wc); + sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi); + +#ifdef CONFIG_X86_64 + /* Paranoia checks on the 64-bit struct layout */ + BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00); + BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c); + + if (kvm->arch.xen.long_mode) { + wc_ofs = offsetof(struct shared_info, wc); + sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi); } +#endif + + kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs); + kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); +out: srcu_read_unlock(&kvm->srcu, idx); return ret; } -- cgit v1.2.3 From 3e3246158808d46b81edb8246214c0ab5a852594 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 2 Feb 2021 16:53:25 +0000 Subject: KVM: x86/xen: Add KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR This will be used for per-vCPU setup such as runstate and vcpu_info. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ arch/x86/kvm/xen.c | 30 ++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 2 ++ include/uapi/linux/kvm.h | 13 +++++++++++++ 4 files changed, 65 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 040d3d470173..66cd574ee458 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5016,6 +5016,26 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_GET_SUPPORTED_HV_CPUID: r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); break; + case KVM_XEN_VCPU_GET_ATTR: { + struct kvm_xen_vcpu_attr xva; + + r = -EFAULT; + if (copy_from_user(&xva, argp, sizeof(xva))) + goto out; + r = kvm_xen_vcpu_get_attr(vcpu, &xva); + if (!r && copy_to_user(argp, &xva, sizeof(xva))) + r = -EFAULT; + break; + } + case KVM_XEN_VCPU_SET_ATTR: { + struct kvm_xen_vcpu_attr xva; + + r = -EFAULT; + if (copy_from_user(&xva, argp, sizeof(xva))) + goto out; + r = kvm_xen_vcpu_set_attr(vcpu, &xva); + break; + } default: r = -EINVAL; } diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index eab4dce93be1..5cbf6955e509 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -118,6 +118,36 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) return r; } +int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&vcpu->kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&vcpu->kvm->lock); + return r; +} + +int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&vcpu->kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&vcpu->kvm->lock); + return r; +} + int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 12a3dc32e78e..fb85377fdbdc 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -13,6 +13,8 @@ extern struct static_key_false_deferred kvm_xen_enabled; +int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); +int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hypercall(struct kvm_vcpu *vcpu); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f57f6e741a28..e2b0cbde1908 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1587,6 +1587,7 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_DIRTY_LOG_RING */ #define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) +/* Per-VM Xen attributes */ #define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) #define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) @@ -1605,6 +1606,18 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 +/* Per-vCPU Xen attributes */ +#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) +#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) + +struct kvm_xen_vcpu_attr { + __u16 type; + __u16 pad[3]; + union { + __u64 pad[8]; + } u; +}; + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3 From 73e69a86347afe8156aa50c436fc192b280b0cd7 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 29 Jun 2018 10:52:52 -0400 Subject: KVM: x86/xen: register vcpu info The vcpu info supersedes the per vcpu area of the shared info page and the guest vcpus will use this instead. Signed-off-by: Joao Martins Signed-off-by: Ankur Arora Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/xen.c | 25 ++++++++++++++++++++++++- include/uapi/linux/kvm.h | 3 +++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 00d1233cfd55..4e864f2bd59a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -534,6 +534,8 @@ struct kvm_vcpu_hv { /* Xen HVM per vcpu emulation context */ struct kvm_vcpu_xen { u64 hypercall_rip; + bool vcpu_info_set; + struct gfn_to_hva_cache vcpu_info_cache; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 5cbf6955e509..2712f1aa9ac9 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -120,15 +120,31 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) { - int r = -ENOENT; + int idx, r = -ENOENT; mutex_lock(&vcpu->kvm->lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); switch (data->type) { + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: + /* No compat necessary here. */ + BUILD_BUG_ON(sizeof(struct vcpu_info) != + sizeof(struct compat_vcpu_info)); + + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.vcpu_info_cache, + data->u.gpa, + sizeof(struct vcpu_info)); + if (!r) + vcpu->arch.xen.vcpu_info_set = true; + break; + + default: break; } + srcu_read_unlock(&vcpu->kvm->srcu, idx); mutex_unlock(&vcpu->kvm->lock); return r; } @@ -140,6 +156,13 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) mutex_lock(&vcpu->kvm->lock); switch (data->type) { + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: + if (vcpu->arch.xen.vcpu_info_set) { + data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; + r = 0; + } + break; + default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e2b0cbde1908..2db0657b3337 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1614,10 +1614,13 @@ struct kvm_xen_vcpu_attr { __u16 type; __u16 pad[3]; union { + __u64 gpa; __u64 pad[8]; } u; }; +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3 From aa096aa0a05ff7f1e7b8da95fae7aa71668212c7 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 1 Feb 2019 13:01:45 -0500 Subject: KVM: x86/xen: setup pvclock updates Parameterise kvm_setup_pvclock_page() a little bit so that it can be invoked for different gfn_to_hva_cache structures, and with different offsets. Then we can invoke it for the normal KVM pvclock and also for the Xen one in the vcpu_info. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 31 ++++++++++++++++++------------- arch/x86/kvm/xen.c | 5 +++-- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 66cd574ee458..a295fb8a8978 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2610,13 +2610,15 @@ u64 get_kvmclock_ns(struct kvm *kvm) return ret; } -static void kvm_setup_pvclock_page(struct kvm_vcpu *v) +static void kvm_setup_pvclock_page(struct kvm_vcpu *v, + struct gfn_to_hva_cache *cache, + unsigned int offset) { struct kvm_vcpu_arch *vcpu = &v->arch; struct pvclock_vcpu_time_info guest_hv_clock; - if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, - &guest_hv_clock, sizeof(guest_hv_clock)))) + if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache, + &guest_hv_clock, offset, sizeof(guest_hv_clock)))) return; /* This VCPU is paused, but it's legal for a guest to read another @@ -2639,9 +2641,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) ++guest_hv_clock.version; /* first time write, random junk */ vcpu->hv_clock.version = guest_hv_clock.version + 1; - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock.version)); + kvm_write_guest_offset_cached(v->kvm, cache, + &vcpu->hv_clock, offset, + sizeof(vcpu->hv_clock.version)); smp_wmb(); @@ -2655,16 +2657,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); + kvm_write_guest_offset_cached(v->kvm, cache, + &vcpu->hv_clock, offset, + sizeof(vcpu->hv_clock)); smp_wmb(); vcpu->hv_clock.version++; - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock.version)); + kvm_write_guest_offset_cached(v->kvm, cache, + &vcpu->hv_clock, offset, + sizeof(vcpu->hv_clock.version)); } static int kvm_guest_time_update(struct kvm_vcpu *v) @@ -2751,7 +2753,10 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.flags = pvclock_flags; if (vcpu->pv_time_enabled) - kvm_setup_pvclock_page(v); + kvm_setup_pvclock_page(v, &vcpu->pv_time, 0); + if (vcpu->xen.vcpu_info_set) + kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, + offsetof(struct compat_vcpu_info, time)); if (v == kvm_get_vcpu(v->kvm, 0)) kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); return 0; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 2712f1aa9ac9..c307f8b7a8a3 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -135,11 +135,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) &vcpu->arch.xen.vcpu_info_cache, data->u.gpa, sizeof(struct vcpu_info)); - if (!r) + if (!r) { vcpu->arch.xen.vcpu_info_set = true; + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); + } break; - default: break; } -- cgit v1.2.3 From f2340cd9e41dc463cb1189274f3db560c1dfa1f4 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Mon, 23 Jul 2018 11:20:57 -0400 Subject: KVM: x86/xen: register vcpu time info region Allow the Xen emulated guest the ability to register secondary vcpu time information. On Xen guests this is used in order to be mapped to userspace and hence allow vdso gettimeofday to work. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 2 ++ arch/x86/kvm/xen.c | 18 ++++++++++++++++++ include/uapi/linux/kvm.h | 1 + 4 files changed, 23 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4e864f2bd59a..c6c84c4ef7b0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -535,7 +535,9 @@ struct kvm_vcpu_hv { struct kvm_vcpu_xen { u64 hypercall_rip; bool vcpu_info_set; + bool vcpu_time_info_set; struct gfn_to_hva_cache vcpu_info_cache; + struct gfn_to_hva_cache vcpu_time_info_cache; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a295fb8a8978..0f648434796f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2757,6 +2757,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) if (vcpu->xen.vcpu_info_set) kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, offsetof(struct compat_vcpu_info, time)); + if (vcpu->xen.vcpu_time_info_set) + kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); if (v == kvm_get_vcpu(v->kvm, 0)) kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); return 0; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index c307f8b7a8a3..bd343222e740 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -141,6 +141,17 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) } break; + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.vcpu_time_info_cache, + data->u.gpa, + sizeof(struct pvclock_vcpu_time_info)); + if (!r) { + vcpu->arch.xen.vcpu_time_info_set = true; + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); + } + break; + default: break; } @@ -164,6 +175,13 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) } break; + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + if (vcpu->arch.xen.vcpu_time_info_set) { + data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; + r = 0; + } + break; + default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2db0657b3337..0f045ffd9cb6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1620,6 +1620,7 @@ struct kvm_xen_vcpu_attr { }; #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3 From 40da8ccd724f7ca2f08550a46268bc3a91cc8869 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 9 Dec 2020 20:08:30 +0000 Subject: KVM: x86/xen: Add event channel interrupt vector upcall It turns out that we can't handle event channels *entirely* in userspace by delivering them as ExtINT, because KVM is a bit picky about when it accepts ExtINT interrupts from a legacy PIC. The in-kernel local APIC has to have LVT0 configured in APIC_MODE_EXTINT and unmasked, which isn't necessarily the case for Xen guests especially on secondary CPUs. To cope with this, add kvm_xen_get_interrupt() which checks the evtchn_pending_upcall field in the Xen vcpu_info, and delivers the Xen upcall vector (configured by KVM_XEN_ATTR_TYPE_UPCALL_VECTOR) if it's set regardless of LAPIC LVT0 configuration. This gives us the minimum support we need for completely userspace-based implementation of event channels. This does mean that vcpu_enter_guest() needs to check for the evtchn_pending_upcall flag being set, because it can't rely on someone having set KVM_REQ_EVENT unless we were to add some way for userspace to do so manually. Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/irq.c | 7 ++++++ arch/x86/kvm/x86.c | 3 ++- arch/x86/kvm/xen.c | 53 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 9 +++++++ include/uapi/linux/kvm.h | 2 ++ 6 files changed, 74 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c6c84c4ef7b0..b37afd856bab 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -913,6 +913,7 @@ struct msr_bitmap_range { struct kvm_xen { bool long_mode; bool shinfo_set; + u8 upcall_vector; struct gfn_to_hva_cache shinfo_cache; }; diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index a035cca82f8f..172b05343cfd 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -14,6 +14,7 @@ #include "irq.h" #include "i8254.h" #include "x86.h" +#include "xen.h" /* * check if there are pending timer events @@ -56,6 +57,9 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.injected; + if (kvm_xen_has_interrupt(v)) + return 1; + if (!kvm_apic_accept_pic_intr(v)) return 0; @@ -110,6 +114,9 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.nr; + if (kvm_xen_has_interrupt(v)) + return v->kvm->arch.xen.upcall_vector; + if (irqchip_split(v->kvm)) { int vector = v->arch.pending_external_vector; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0f648434796f..1cedbc3d2455 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8987,7 +8987,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) static_call(kvm_x86_msr_filter_changed)(vcpu); } - if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { + if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || + kvm_xen_has_interrupt(vcpu)) { ++vcpu->stat.req_event; kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index bd343222e740..39a7ffcdcf22 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -61,6 +61,44 @@ out: return ret; } +int __kvm_xen_has_interrupt(struct kvm_vcpu *v) +{ + u8 rc = 0; + + /* + * If the global upcall vector (HVMIRQ_callback_vector) is set and + * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending. + */ + struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache; + struct kvm_memslots *slots = kvm_memslots(v->kvm); + unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending); + + /* No need for compat handling here */ + BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != + offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); + BUILD_BUG_ON(sizeof(rc) != + sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending)); + BUILD_BUG_ON(sizeof(rc) != + sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending)); + + /* + * For efficiency, this mirrors the checks for using the valid + * cache in kvm_read_guest_offset_cached(), but just uses + * __get_user() instead. And falls back to the slow path. + */ + if (likely(slots->generation == ghc->generation && + !kvm_is_error_hva(ghc->hva) && ghc->memslot)) { + /* Fast path */ + __get_user(rc, (u8 __user *)ghc->hva + offset); + } else { + /* Slow path */ + kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset, + sizeof(rc)); + } + + return rc; +} + int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) { int r = -ENOENT; @@ -83,6 +121,16 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); break; + + case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: + if (data->u.vector < 0x10) + r = -EINVAL; + else { + kvm->arch.xen.upcall_vector = data->u.vector; + r = 0; + } + break; + default: break; } @@ -110,6 +158,11 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) } break; + case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: + data->u.vector = kvm->arch.xen.upcall_vector; + r = 0; + break; + default: break; } diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index fb85377fdbdc..4b32489c0cec 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -13,6 +13,7 @@ extern struct static_key_false_deferred kvm_xen_enabled; +int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); @@ -29,6 +30,14 @@ static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); } +static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) +{ + if (static_branch_unlikely(&kvm_xen_enabled.key) && + vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector) + return __kvm_xen_has_interrupt(vcpu); + + return 0; +} /* 32-bit compatibility definitions, also used natively in 32-bit build */ #include diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0f045ffd9cb6..4d4cd001c908 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1596,6 +1596,7 @@ struct kvm_xen_hvm_attr { __u16 pad[3]; union { __u8 long_mode; + __u8 vector; struct { __u64 gfn; } shared_info; @@ -1605,6 +1606,7 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 +#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 /* Per-vCPU Xen attributes */ #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) -- cgit v1.2.3 From 8d4e7e80838f45d3466d36d4fcb890424825faa9 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 4 Dec 2020 01:02:04 +0000 Subject: KVM: x86: declare Xen HVM shared info capability and add test case Instead of adding a plethora of new KVM_CAP_XEN_FOO capabilities, just add bits to the return value of KVM_CAP_XEN_HVM. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 3 +- include/uapi/linux/kvm.h | 3 + tools/testing/selftests/kvm/Makefile | 1 + .../testing/selftests/kvm/x86_64/xen_shinfo_test.c | 169 +++++++++++++++++++++ 4 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1cedbc3d2455..838ce5e9814b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3769,7 +3769,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_XEN_HVM: r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | - KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | + KVM_XEN_HVM_CONFIG_SHARED_INFO; break; case KVM_CAP_SYNC_REGS: r = KVM_SYNC_X86_VALID_FIELDS; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4d4cd001c908..63f8f6e95648 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1152,6 +1152,7 @@ struct kvm_x86_mce { #ifdef KVM_CAP_XEN_HVM #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) +#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) struct kvm_xen_hvm_config { __u32 flags; @@ -1604,6 +1605,7 @@ struct kvm_xen_hvm_attr { } u; }; +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 @@ -1621,6 +1623,7 @@ struct kvm_xen_vcpu_attr { } u; }; +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index be9613dd6d9b..8c8eda429576 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -62,6 +62,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test +TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c new file mode 100644 index 000000000000..bdb3feb86b5b --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * svm_vmcall_test + * + * Copyright © 2021 Amazon.com, Inc. or its affiliates. + * + * Xen shared_info / pvclock testing + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#include +#include + +#define VCPU_ID 5 + +#define SHINFO_REGION_GPA 0xc0000000ULL +#define SHINFO_REGION_SLOT 10 +#define PAGE_SIZE 4096 + +#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE) + +static struct kvm_vm *vm; + +#define XEN_HYPERCALL_MSR 0x40000000 + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; +} __attribute__((__packed__)); + +static void guest_code(void) +{ + GUEST_DONE(); +} + +static int cmp_timespec(struct timespec *a, struct timespec *b) +{ + if (a->tv_sec > b->tv_sec) + return 1; + else if (a->tv_sec < b->tv_sec) + return -1; + else if (a->tv_nsec > b->tv_nsec) + return 1; + else if (a->tv_nsec < b->tv_nsec) + return -1; + else + return 0; +} + +int main(int argc, char *argv[]) +{ + struct timespec min_ts, max_ts, vm_ts; + + if (!(kvm_check_cap(KVM_CAP_XEN_HVM) & + KVM_XEN_HVM_CONFIG_SHARED_INFO) ) { + print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available"); + exit(KSFT_SKIP); + } + + clock_gettime(CLOCK_REALTIME, &min_ts); + + vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + /* Map a region for the shared_info page */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + SHINFO_REGION_GPA, SHINFO_REGION_SLOT, + 2 * getpagesize(), 0); + virt_map(vm, SHINFO_REGION_GPA, SHINFO_REGION_GPA, 2, 0); + + struct kvm_xen_hvm_config hvmc = { + .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, + .msr = XEN_HYPERCALL_MSR, + }; + vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc); + + struct kvm_xen_hvm_attr lm = { + .type = KVM_XEN_ATTR_TYPE_LONG_MODE, + .u.long_mode = 1, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm); + + struct kvm_xen_hvm_attr ha = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, + .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha); + + struct kvm_xen_vcpu_attr vi = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, + .u.gpa = SHINFO_REGION_GPA + 40, + }; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi); + + struct kvm_xen_vcpu_attr pvclock = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + .u.gpa = PVTIME_ADDR, + }; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock); + + for (;;) { + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct ucall uc; + + vcpu_run(vm, VCPU_ID); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_FAIL("%s", (const char *)uc.args[0]); + /* NOT REACHED */ + case UCALL_SYNC: + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); + } + } + + done: + clock_gettime(CLOCK_REALTIME, &max_ts); + + /* + * Just a *really* basic check that things are being put in the + * right place. The actual calculations are much the same for + * Xen as they are for the KVM variants, so no need to check. + */ + struct pvclock_wall_clock *wc; + struct pvclock_vcpu_time_info *ti, *ti2; + + wc = addr_gva2hva(vm, SHINFO_REGION_GPA + 0xc00); + ti = addr_gva2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20); + ti2 = addr_gva2hva(vm, PVTIME_ADDR); + + vm_ts.tv_sec = wc->sec; + vm_ts.tv_nsec = wc->nsec; + TEST_ASSERT(wc->version && !(wc->version & 1), + "Bad wallclock version %x", wc->version); + TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old"); + TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new"); + + TEST_ASSERT(ti->version && !(ti->version & 1), + "Bad time_info version %x", ti->version); + TEST_ASSERT(ti2->version && !(ti2->version & 1), + "Bad time_info version %x", ti->version); + + kvm_vm_free(vm); + return 0; +} -- cgit v1.2.3 From e1f68169a4f89e49f33bf52df29aeb57cb8b1144 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 4 Dec 2020 09:03:56 +0000 Subject: KVM: Add documentation for Xen hypercall and shared_info updates Signed-off-by: David Woodhouse --- Documentation/virt/kvm/api.rst | 171 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 166 insertions(+), 5 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index effcc08733fd..5919e9a94f25 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -960,6 +960,14 @@ memory. __u8 pad2[30]; }; +If the KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL flag is returned from the +KVM_CAP_XEN_HVM check, it may be set in the flags field of this ioctl. +This requests KVM to generate the contents of the hypercall page +automatically; hypercalls will be intercepted and passed to userspace +through KVM_EXIT_XEN. In this case, all of the blob size and address +fields must be zero. + +No other flags are currently valid in the struct kvm_xen_hvm_config. 4.29 KVM_GET_CLOCK ------------------ @@ -4831,6 +4839,101 @@ into user space. If a vCPU is in running state while this ioctl is invoked, the vCPU may experience inconsistent filtering behavior on MSR accesses. +4.127 KVM_XEN_HVM_SET_ATTR +-------------------------- + +:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO +:Architectures: x86 +:Type: vm ioctl +:Parameters: struct kvm_xen_hvm_attr +:Returns: 0 on success, < 0 on error + +:: + + struct kvm_xen_hvm_attr { + __u16 type; + __u16 pad[3]; + union { + __u8 long_mode; + __u8 vector; + struct { + __u64 gfn; + } shared_info; + __u64 pad[4]; + } u; + }; + +type values: + +KVM_XEN_ATTR_TYPE_LONG_MODE + Sets the ABI mode of the VM to 32-bit or 64-bit (long mode). This + determines the layout of the shared info pages exposed to the VM. + +KVM_XEN_ATTR_TYPE_SHARED_INFO + Sets the guest physical frame number at which the Xen "shared info" + page resides. Note that although Xen places vcpu_info for the first + 32 vCPUs in the shared_info page, KVM does not automatically do so + and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used + explicitly even when the vcpu_info for a given vCPU resides at the + "default" location in the shared_info page. This is because KVM is + not aware of the Xen CPU id which is used as the index into the + vcpu_info[] array, so cannot know the correct default location. + +KVM_XEN_ATTR_TYPE_UPCALL_VECTOR + Sets the exception vector used to deliver Xen event channel upcalls. + +4.128 KVM_XEN_HVM_GET_ATTR +-------------------------- + +:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO +:Architectures: x86 +:Type: vm ioctl +:Parameters: struct kvm_xen_hvm_attr +:Returns: 0 on success, < 0 on error + +Allows Xen VM attributes to be read. For the structure and types, +see KVM_XEN_HVM_SET_ATTR above. + +4.129 KVM_XEN_VCPU_SET_ATTR +--------------------------- + +:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO +:Architectures: x86 +:Type: vcpu ioctl +:Parameters: struct kvm_xen_vcpu_attr +:Returns: 0 on success, < 0 on error + +:: + + struct kvm_xen_vcpu_attr { + __u16 type; + __u16 pad[3]; + union { + __u64 gpa; + __u64 pad[4]; + } u; + }; + +type values: + +KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO + Sets the guest physical address of the vcpu_info for a given vCPU. + +KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO + Sets the guest physical address of an additional pvclock structure + for a given vCPU. This is typically used for guest vsyscall support. + +4.130 KVM_XEN_VCPU_GET_ATTR +-------------------------- + +:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO +:Architectures: x86 +:Type: vcpu ioctl +:Parameters: struct kvm_xen_vcpu_attr +:Returns: 0 on success, < 0 on error + +Allows Xen vCPU attributes to be read. For the structure and types, +see KVM_XEN_VCPU_SET_ATTR above. 5. The kvm_run structure ======================== @@ -4998,13 +5101,18 @@ to the byte array. .. note:: - For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR, + For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR, KVM_EXIT_XEN, KVM_EXIT_EPR, KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR the corresponding operations are complete (and guest state is consistent) only after userspace has re-entered the kernel with KVM_RUN. The kernel side will first finish - incomplete operations and then check for pending signals. Userspace - can re-enter the guest with an unmasked signal pending to complete - pending operations. + incomplete operations and then check for pending signals. + + The pending state of the operation is not preserved in state which is + visible to userspace, thus userspace should ensure that the operation is + completed before performing a live migration. Userspace can re-enter the + guest with an unmasked signal pending or with the immediate_exit field set + to complete pending operations without allowing any further instructions + to be executed. :: @@ -5329,6 +5437,34 @@ wants to write. Once finished processing the event, user space must continue vCPU execution. If the MSR write was unsuccessful, user space also sets the "error" field to "1". +:: + + + struct kvm_xen_exit { + #define KVM_EXIT_XEN_HCALL 1 + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; + }; + /* KVM_EXIT_XEN */ + struct kvm_hyperv_exit xen; + +Indicates that the VCPU exits into userspace to process some tasks +related to Xen emulation. + +Valid values for 'type' are: + + - KVM_EXIT_XEN_HCALL -- synchronously notify user-space about Xen hypercall. + Userspace is expected to place the hypercall result into the appropriate + field before invoking KVM_RUN again. + :: /* Fix the size of the union. */ @@ -6454,7 +6590,6 @@ guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf (0x40000001). Otherwise, a guest may use the paravirtual features regardless of what has actually been exposed through the CPUID leaf. - 8.29 KVM_CAP_DIRTY_LOG_RING --------------------------- @@ -6541,3 +6676,29 @@ KVM_GET_DIRTY_LOG and KVM_CLEAR_DIRTY_LOG. After enabling KVM_CAP_DIRTY_LOG_RING with an acceptable dirty ring size, the virtual machine will switch to ring-buffer dirty page tracking and further KVM_GET_DIRTY_LOG or KVM_CLEAR_DIRTY_LOG ioctls will fail. + +8.30 KVM_CAP_XEN_HVM +-------------------- + +:Architectures: x86 + +This capability indicates the features that Xen supports for hosting Xen +PVHVM guests. Valid flags are:: + + #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) + #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) + #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) + +The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG +ioctl is available, for the guest to set its hypercall page. + +If KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL is also set, the same flag may also be +provided in the flags to KVM_XEN_HVM_CONFIG, without providing hypercall page +contents, to request that KVM generate hypercall page content automatically +and also enable interception of guest hypercalls with KVM_EXIT_XEN. + +The KVM_XEN_HVM_CONFIG_SHARED_INFO flag indicates the availability of the +KVM_XEN_HVM_SET_ATTR, KVM_XEN_HVM_GET_ATTR, KVM_XEN_VCPU_SET_ATTR and +KVM_XEN_VCPU_GET_ATTR ioctls, as well as the delivery of exception vectors +for event channel upcalls when the evtchn_upcall_pending field of a vcpu's +vcpu_info is set. -- cgit v1.2.3 From f156abec725f945f9884bc6a5bd0dccb5aac16a8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:06 -0800 Subject: KVM: x86: Set so called 'reserved CR3 bits in LM mask' at vCPU reset Set cr3_lm_rsvd_bits, which is effectively an invalid GPA mask, at vCPU reset. The reserved bits check needs to be done even if userspace never configures the guest's CPUID model. Cc: stable@vger.kernel.org Fixes: 0107973a80ad ("KVM: x86: Introduce cr3_lm_rsvd_bits in kvm_vcpu_arch") Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 838ce5e9814b..10414a78b951 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10080,6 +10080,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) fx_init(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); + vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; -- cgit v1.2.3 From 2732be90235347a3be4babdc9f88a1ea93970b0b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:07 -0800 Subject: KVM: nSVM: Don't strip host's C-bit from guest's CR3 when reading PDPTRs Don't clear the SME C-bit when reading a guest PDPTR, as the GPA (CR3) is in the guest domain. Barring a bizarre paravirtual use case, this is likely a benign bug. SME is not emulated by KVM, loading SEV guest PDPTRs is doomed as KVM can't use the correct key to read guest memory, and setting guest MAXPHYADDR higher than the host, i.e. overlapping the C-bit, would cause faults in the guest. Note, for SEV guests, stripping the C-bit is technically aligned with CPU behavior, but for KVM it's the greater of two evils. Because KVM doesn't have access to the guest's encryption key, ignoring the C-bit would at best result in KVM reading garbage. By keeping the C-bit, KVM will fail its read (unless userspace creates a memslot with the C-bit set). The guest will still undoubtedly die, as KVM will use '0' for the PDPTR value, but that's preferable to interpreting encrypted data as a PDPTR. Fixes: d0ec49d4de90 ("kvm/x86/svm: Support Secure Memory Encryption within KVM") Cc: Tom Lendacky Cc: Brijesh Singh Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index d679130d209b..69188897cae2 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -58,7 +58,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) u64 pdpte; int ret; - ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, + ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, offset_in_page(cr3) + index * 8, 8); if (ret) return 0; -- cgit v1.2.3 From 4bda0e97868a95553ba71d87f593756e1ffd654b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:08 -0800 Subject: KVM: x86: Add a helper to check for a legal GPA Add a helper to check for a legal GPA, and use it to consolidate code in existing, related helpers. Future patches will extend usage to VMX and SVM code, properly handle exceptions to the maxphyaddr rule, and add more helpers. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index dc921d76e42e..674d61079f2d 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -36,9 +36,19 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) return vcpu->arch.maxphyaddr; } +static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + return !(gpa >> cpuid_maxphyaddr(vcpu)); +} + static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { - return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu))); + return !kvm_vcpu_is_legal_gpa(vcpu, gpa); +} + +static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + return PAGE_ALIGNED(gpa) && kvm_vcpu_is_legal_gpa(vcpu, gpa); } struct cpuid_reg { @@ -324,11 +334,6 @@ static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) kvm_cpu_cap_set(x86_feature); } -static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); -} - static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, unsigned int kvm_feature) { -- cgit v1.2.3 From da6c6a7c06e268f53c0560edc9dff372f11218f5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:09 -0800 Subject: KVM: x86: Add a helper to handle legal GPA with an alignment requirement Add a helper to genericize checking for a legal GPA that also must conform to an arbitrary alignment, and use it in the existing page_address_valid(). Future patches will replace open coded variants in VMX and SVM. Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 674d61079f2d..a9d55ab51e3c 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -46,9 +46,15 @@ static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return !kvm_vcpu_is_legal_gpa(vcpu, gpa); } +static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, + gpa_t gpa, gpa_t alignment) +{ + return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); +} + static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) { - return PAGE_ALIGNED(gpa) && kvm_vcpu_is_legal_gpa(vcpu, gpa); + return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); } struct cpuid_reg { -- cgit v1.2.3 From 636e8b733491135aacf9d1dfd23d9e77637f1198 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:10 -0800 Subject: KVM: VMX: Use GPA legality helpers to replace open coded equivalents Replace a variety of open coded GPA checks with the recently introduced common helpers. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 26 +++++++------------------- arch/x86/kvm/vmx/vmx.c | 2 +- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 887283b1df43..b2f0b5e9cd63 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -745,8 +745,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, (CC(!nested_cpu_has_vid(vmcs12)) || CC(!nested_exit_intr_ack_set(vcpu)) || CC((vmcs12->posted_intr_nv & 0xff00)) || - CC((vmcs12->posted_intr_desc_addr & 0x3f)) || - CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))) + CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ @@ -759,13 +758,11 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, u32 count, u64 addr) { - int maxphyaddr; - if (count == 0) return 0; - maxphyaddr = cpuid_maxphyaddr(vcpu); - if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || - (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) + + if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || + !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) return -EINVAL; return 0; @@ -1063,14 +1060,6 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, } } -static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) -{ - unsigned long invalid_mask; - - invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); - return (val & invalid_mask) == 0; -} - /* * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit. * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't @@ -1122,7 +1111,7 @@ static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu) static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, enum vm_entry_failure_code *entry_failure_code) { - if (CC(!nested_cr3_valid(vcpu, cr3))) { + if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; } @@ -2636,7 +2625,6 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) { struct vcpu_vmx *vmx = to_vmx(vcpu); - int maxphyaddr = cpuid_maxphyaddr(vcpu); /* Check for memory type validity */ switch (new_eptp & VMX_EPTP_MT_MASK) { @@ -2667,7 +2655,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) } /* Reserved bits should not be set */ - if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f))) + if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) return false; /* AD, if set, should be supported */ @@ -2851,7 +2839,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || - CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3))) + CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) return -EINVAL; if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d995f47a08ef..777177ea9a35 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1110,7 +1110,7 @@ static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base) { /* The base must be 128-byte aligned and a legal physical address. */ - return !kvm_vcpu_is_illegal_gpa(vcpu, base) && !(base & 0x7f); + return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128); } static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) -- cgit v1.2.3 From bbc2c63ddd51cd6d349e3fe0010f9b7b259e58ea Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:11 -0800 Subject: KVM: nSVM: Use common GPA helper to check for illegal CR3 Replace an open coded check for an invalid CR3 with its equivalent helper. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 69188897cae2..1055b7d0012b 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -345,7 +345,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt) { - if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)) + if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) return -EINVAL; if (!nested_npt && is_pae_paging(vcpu) && -- cgit v1.2.3 From ca29e14506bd66d50733c1f3e4448aba54e70cc7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:12 -0800 Subject: KVM: x86: SEV: Treat C-bit as legal GPA bit regardless of vCPU mode Rename cr3_lm_rsvd_bits to reserved_gpa_bits, and use it for all GPA legality checks. AMD's APM states: If the C-bit is an address bit, this bit is masked from the guest physical address when it is translated through the nested page tables. Thus, any access that can conceivably be run through NPT should ignore the C-bit when checking for validity. For features that KVM emulates in software, e.g. MTRRs, there is no clear direction in the APM for how the C-bit should be handled. For such cases, follow the SME behavior inasmuch as possible, since SEV is is essentially a VM-specific variant of SME. For SME, the APM states: In this case the upper physical address bits are treated as reserved when the feature is enabled except where otherwise indicated. Collecting the various relavant SME snippets in the APM and cross- referencing the omissions with Linux kernel code, this leaves MTTRs and APIC_BASE as the only flows that KVM emulates that should _not_ ignore the C-bit. Note, this means the reserved bit checks in the page tables are technically broken. This will be remedied in a future patch. Although the page table checks are technically broken, in practice, it's all but guaranteed to be irrelevant. NPT is required for SEV, i.e. shadowing page tables isn't needed in the common case. Theoretically, the checks could be in play for nested NPT, but it's extremely unlikely that anyone is running nested VMs on SEV, as doing so would require L1 to expose sensitive data to L0, e.g. the entire VMCB. And if anyone is running nested VMs, L0 can't read the guest's encrypted memory, i.e. L1 would need to put its NPT in shared memory, in which case the C-bit will never be set. Or, L1 could use shadow paging, but again, if L0 needs to read page tables, e.g. to load PDPTRs, the memory can't be encrypted if L1 has any expectation of L0 doing the right thing. Cc: Tom Lendacky Cc: Brijesh Singh Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/cpuid.h | 2 +- arch/x86/kvm/svm/nested.c | 2 +- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/x86.c | 7 +++---- 6 files changed, 8 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b37afd856bab..717940e97f66 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -660,7 +660,7 @@ struct kvm_vcpu_arch { int cpuid_nent; struct kvm_cpuid_entry2 *cpuid_entries; - unsigned long cr3_lm_rsvd_bits; + u64 reserved_gpa_bits; int maxphyaddr; int max_tdp_level; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ff16734e6b6a..8d75576edf9c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -179,7 +179,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); - vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + vcpu->arch.reserved_gpa_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); /* Invoke the vendor callback only after the above state is updated. */ static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index a9d55ab51e3c..f673f45bdf52 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -38,7 +38,7 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { - return !(gpa >> cpuid_maxphyaddr(vcpu)); + return !(gpa & vcpu->arch.reserved_gpa_bits); } static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 1055b7d0012b..cc91738ab445 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -248,7 +248,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) if (vmcb12_lma) { if (!(vmcb12->save.cr4 & X86_CR4_PAE) || !(vmcb12->save.cr0 & X86_CR0_PE) || - (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits)) + kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3)) return false; } if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fdea0994e94f..4141caea857a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4058,7 +4058,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) if (sev_guest(vcpu->kvm)) { best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); if (best) - vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f)); + vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); } if (!kvm_vcpu_apicv_active(vcpu)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 10414a78b951..077e8b9c9e11 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1074,8 +1074,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) return 0; } - if (is_long_mode(vcpu) && - (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) + if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3)) return 1; else if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) @@ -9701,7 +9700,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) */ if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) return false; - if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits) + if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) return false; } else { /* @@ -10080,7 +10079,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) fx_init(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); - vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + vcpu->arch.reserved_gpa_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; -- cgit v1.2.3 From 5b7f575ccd29eb1a0b013961bee5957574046094 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:13 -0800 Subject: KVM: x86: Use reserved_gpa_bits to calculate reserved PxE bits Use reserved_gpa_bits, which accounts for exceptions to the maxphyaddr rule, e.g. SEV's C-bit, for the page {table,directory,etc...} entry (PxE) reserved bits checks. For SEV, the C-bit is ignored by hardware when walking pages tables, e.g. the APM states: Note that while the guest may choose to set the C-bit explicitly on instruction pages and page table addresses, the value of this bit is a don't-care in such situations as hardware always performs these as private accesses. Such behavior is expected to hold true for other features that repurpose GPA bits, e.g. KVM could theoretically emulate SME or MKTME, which both allow non-zero repurposed bits in the page tables. Conceptually, KVM should apply reserved GPA checks universally, and any features that do not adhere to the basic rule should be explicitly handled, i.e. if a GPA bit is repurposed but not allowed in page tables for whatever reason. Refactor __reset_rsvds_bits_mask() to take the pre-generated reserved bits mask, and opportunistically clean up its code, e.g. to align lines and comments. Practically speaking, this is change is a likely a glorified nop given the current KVM code base. SEV's C-bit is the only repurposed GPA bit, and KVM doesn't support shadowing encrypted page tables (which is theoretically possible via SEV debug APIs). Cc: Rick Edgecombe Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 10 +++-- arch/x86/kvm/mmu/mmu.c | 104 ++++++++++++++++++++++++------------------------- arch/x86/kvm/x86.c | 3 +- 3 files changed, 58 insertions(+), 59 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 8d75576edf9c..98db12a7a780 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -173,16 +173,20 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_update_pv_runtime(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); - kvm_mmu_reset_context(vcpu); + vcpu->arch.reserved_gpa_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); kvm_pmu_refresh(vcpu); vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); - vcpu->arch.reserved_gpa_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); - /* Invoke the vendor callback only after the above state is updated. */ static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); + + /* + * Except for the MMU, which needs to be reset after any vendor + * specific adjustments to the reserved GPA bits. + */ + kvm_mmu_reset_context(vcpu); } static int is_efer_nx(void) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e8bfff9acd5e..d462db3bc742 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3985,20 +3985,27 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, static void __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct rsvd_bits_validate *rsvd_check, - int maxphyaddr, int level, bool nx, bool gbpages, + u64 pa_bits_rsvd, int level, bool nx, bool gbpages, bool pse, bool amd) { - u64 exb_bit_rsvd = 0; u64 gbpages_bit_rsvd = 0; u64 nonleaf_bit8_rsvd = 0; + u64 high_bits_rsvd; rsvd_check->bad_mt_xwr = 0; - if (!nx) - exb_bit_rsvd = rsvd_bits(63, 63); if (!gbpages) gbpages_bit_rsvd = rsvd_bits(7, 7); + if (level == PT32E_ROOT_LEVEL) + high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62); + else + high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); + + /* Note, NX doesn't exist in PDPTEs, this is handled below. */ + if (!nx) + high_bits_rsvd |= rsvd_bits(63, 63); + /* * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for * leaf entries) on AMD CPUs only. @@ -4027,45 +4034,39 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); break; case PT32E_ROOT_LEVEL: - rsvd_check->rsvd_bits_mask[0][2] = - rsvd_bits(maxphyaddr, 63) | - rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ - rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62); /* PDE */ - rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62); /* PTE */ - rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 62) | - rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) | + high_bits_rsvd | + rsvd_bits(5, 8) | + rsvd_bits(1, 2); /* PDPTE */ + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */ + rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */ + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | + rsvd_bits(13, 20); /* large page */ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; case PT64_ROOT_5LEVEL: - rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | - rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | + nonleaf_bit8_rsvd | + rsvd_bits(7, 7); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; fallthrough; case PT64_ROOT_4LEVEL: - rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | - gbpages_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51); - rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | + nonleaf_bit8_rsvd | + rsvd_bits(7, 7); + rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | + gbpages_bit_rsvd; + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; + rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | - gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | - rsvd_bits(13, 29); - rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | - rsvd_bits(maxphyaddr, 51) | - rsvd_bits(13, 20); /* large page */ + rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | + gbpages_bit_rsvd | + rsvd_bits(13, 29); + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | + rsvd_bits(13, 20); /* large page */ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; break; @@ -4076,8 +4077,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, - cpuid_maxphyaddr(vcpu), context->root_level, - context->nx, + vcpu->arch.reserved_gpa_bits, + context->root_level, context->nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), guest_cpuid_is_amd_or_hygon(vcpu)); @@ -4085,27 +4086,22 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, - int maxphyaddr, bool execonly) + u64 pa_bits_rsvd, bool execonly) { + u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); u64 bad_mt_xwr; - rsvd_check->rsvd_bits_mask[0][4] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][3] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][2] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][1] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); + rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* large page */ rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); - rsvd_check->rsvd_bits_mask[1][1] = - rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); + rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29); + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20); rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ @@ -4124,7 +4120,7 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, - cpuid_maxphyaddr(vcpu), execonly); + vcpu->arch.reserved_gpa_bits, execonly); } /* @@ -4146,7 +4142,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) */ shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - shadow_phys_bits, + rsvd_bits(shadow_phys_bits, 63), context->shadow_root_level, uses_nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), true); @@ -4183,13 +4179,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - shadow_phys_bits, + rsvd_bits(shadow_phys_bits, 63), context->shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), true, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, - shadow_phys_bits, + rsvd_bits(shadow_phys_bits, 63), false); if (!shadow_me_mask) @@ -4210,7 +4206,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - shadow_phys_bits, execonly); + rsvd_bits(shadow_phys_bits, 63), execonly); } #define BYTE_MASK(access) \ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 077e8b9c9e11..ef398c540d7f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -759,8 +759,7 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) { - return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | - rsvd_bits(1, 2); + return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); } /* -- cgit v1.2.3 From 6f8e65a60168567cc59f9b99980ea9112d4152f5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:14 -0800 Subject: KVM: x86/mmu: Add helper to generate mask of reserved HPA bits Add a helper to generate the mask of reserved PA bits in the host. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d462db3bc742..86af58294272 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4123,6 +4123,11 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, vcpu->arch.reserved_gpa_bits, execonly); } +static inline u64 reserved_hpa_bits(void) +{ + return rsvd_bits(shadow_phys_bits, 63); +} + /* * the page table on host is the shadow page table for the page * table in guest or amd nested guest, its mmu features completely @@ -4142,7 +4147,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) */ shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - rsvd_bits(shadow_phys_bits, 63), + reserved_hpa_bits(), context->shadow_root_level, uses_nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), true); @@ -4179,14 +4184,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - rsvd_bits(shadow_phys_bits, 63), + reserved_hpa_bits(), context->shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), true, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, - rsvd_bits(shadow_phys_bits, 63), - false); + reserved_hpa_bits(), false); if (!shadow_me_mask) return; @@ -4206,7 +4210,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - rsvd_bits(shadow_phys_bits, 63), execonly); + reserved_hpa_bits(), execonly); } #define BYTE_MASK(access) \ -- cgit v1.2.3 From a8ac864a7d6dbc2fc43081b1eecd9e0183065d47 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Feb 2021 16:01:15 -0800 Subject: KVM: x86: Add helper to consolidate "raw" reserved GPA mask calculations Add a helper to generate the mask of reserved GPA bits _without_ any adjustments for repurposed bits, and use it to replace a variety of open coded variants in the MTRR and APIC_BASE flows. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210204000117.3303214-11-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 12 +++++++++++- arch/x86/kvm/cpuid.h | 1 + arch/x86/kvm/mtrr.c | 12 ++++++------ arch/x86/kvm/x86.c | 4 ++-- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 98db12a7a780..f47bbd4b0837 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -173,7 +173,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_update_pv_runtime(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); - vcpu->arch.reserved_gpa_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); kvm_pmu_refresh(vcpu); vcpu->arch.cr4_guest_rsvd_bits = @@ -227,6 +227,16 @@ not_found: return 36; } +/* + * This "raw" version returns the reserved GPA bits without any adjustments for + * encryption technologies that usurp bits. The raw mask should be used if and + * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. + */ +u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) +{ + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); +} + /* when an old userspace process fills a new kernel module */ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index f673f45bdf52..2a0c5064497f 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -30,6 +30,7 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); +u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu); static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index f472fdb6ae7e..a8502e02f479 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -75,7 +75,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); - mask = (~0ULL) << cpuid_maxphyaddr(vcpu); + mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) @@ -351,14 +351,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (var_mtrr_range_is_valid(cur)) list_del(&mtrr_state->var_ranges[index].node); - /* Extend the mask with all 1 bits to the left, since those - * bits must implicitly be 0. The bits are then cleared - * when reading them. + /* + * Set all illegal GPA bits in the mask, since those bits must + * implicitly be 0. The bits are then cleared when reading them. */ if (!is_mtrr_mask) cur->base = data; else - cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); + cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu); /* add it to the list if it's enabled. */ if (var_mtrr_range_is_valid(cur)) { @@ -426,7 +426,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) else *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; - *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; + *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu); } return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ef398c540d7f..d9f931c63293 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -407,7 +407,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); - u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | + u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) @@ -10078,7 +10078,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) fx_init(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); - vcpu->arch.reserved_gpa_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; -- cgit v1.2.3 From e36b250e502a6145fa95a3d93af83ba66fc6ae1b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 8 Feb 2021 06:29:28 -0500 Subject: i915: kvmgt: the KVM mmu_lock is now an rwlock Adjust the KVMGT page tracking callbacks. Cc: Zhenyu Wang Cc: Zhi Wang Cc: intel-gvt-dev@lists.freedesktop.org Cc: intel-gfx@lists.freedesktop.org Signed-off-by: Paolo Bonzini --- drivers/gpu/drm/i915/gvt/kvmgt.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 60f1a386dd06..b4348256ae95 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1703,7 +1703,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) return -EINVAL; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvmgt_gfn_is_write_protected(info, gfn)) goto out; @@ -1712,7 +1712,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) kvmgt_protect_table_add(info, gfn); out: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } @@ -1737,7 +1737,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) return -EINVAL; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (!kvmgt_gfn_is_write_protected(info, gfn)) goto out; @@ -1746,7 +1746,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) kvmgt_protect_table_del(info, gfn); out: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } @@ -1772,7 +1772,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct kvmgt_guest_info *info = container_of(node, struct kvmgt_guest_info, track_node); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < slot->npages; i++) { gfn = slot->base_gfn + i; if (kvmgt_gfn_is_write_protected(info, gfn)) { @@ -1781,7 +1781,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, kvmgt_protect_table_del(info, gfn); } } - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) -- cgit v1.2.3 From 897218ff7cf19290ec2d69652ce673d8ed6fedeb Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 6 Feb 2021 09:53:33 -0500 Subject: KVM: x86: compile out TDP MMU on 32-bit systems The TDP MMU assumes that it can do atomic accesses to 64-bit PTEs. Rather than just disabling it, compile it out completely so that it is possible to use for example 64-bit xchg. To limit the number of stubs, wrap all accesses to tdp_mmu_enabled or tdp_mmu_page with a function. Calls to all other functions in tdp_mmu.c are eliminated and do not even reach the linker. Reviewed-by: Sean Christopherson Tested-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/Makefile | 3 ++- arch/x86/kvm/mmu/mmu.c | 36 ++++++++++++++++++------------------ arch/x86/kvm/mmu/mmu_internal.h | 2 ++ arch/x86/kvm/mmu/tdp_mmu.c | 29 +---------------------------- arch/x86/kvm/mmu/tdp_mmu.h | 32 ++++++++++++++++++++++++++++---- 6 files changed, 53 insertions(+), 51 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 717940e97f66..1ed1206c196d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1033,6 +1033,7 @@ struct kvm_arch { struct kvm_pmu_event_filter *pmu_event_filter; struct task_struct *nx_lpage_recovery_thread; +#ifdef CONFIG_X86_64 /* * Whether the TDP MMU is enabled for this VM. This contains a * snapshot of the TDP MMU module parameter from when the VM was @@ -1071,6 +1072,7 @@ struct kvm_arch { * the thread holds the MMU lock in write mode. */ spinlock_t tdp_mmu_pages_lock; +#endif /* CONFIG_X86_64 */ }; struct kvm_vm_stat { diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index a50041235530..aeab168c5711 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -17,7 +17,8 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o emulate.o i8259.o irq.o lapic.o xen.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o + mmu/spte.o +kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 86af58294272..bebb66c6d068 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1225,7 +1225,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, { struct kvm_rmap_head *rmap_head; - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, true); while (mask) { @@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, { struct kvm_rmap_head *rmap_head; - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, slot->base_gfn + gfn_offset, mask, false); while (mask) { @@ -1310,7 +1310,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, write_protected |= __rmap_write_protect(kvm, rmap_head, true); } - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) write_protected |= kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn); @@ -1522,7 +1522,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end); return r; @@ -1534,7 +1534,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte); return r; @@ -1589,7 +1589,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) int young = false; young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) young |= kvm_tdp_mmu_age_hva_range(kvm, start, end); return young; @@ -1600,7 +1600,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) int young = false; young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) young |= kvm_tdp_mmu_test_age_hva(kvm, hva); return young; @@ -3155,7 +3155,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); if (kvm_mmu_put_root(kvm, sp)) { - if (sp->tdp_mmu_page) + if (is_tdp_mmu_page(sp)) kvm_tdp_mmu_free_root(kvm, sp); else if (sp->role.invalid) kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); @@ -3249,7 +3249,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) hpa_t root; unsigned i; - if (vcpu->kvm->arch.tdp_mmu_enabled) { + if (is_tdp_mmu_enabled(vcpu->kvm)) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); if (!VALID_PAGE(root)) @@ -5411,7 +5411,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) kvm_zap_obsolete_pages(kvm); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_zap_all(kvm); write_unlock(&kvm->mmu_lock); @@ -5474,7 +5474,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) } } - if (kvm->arch.tdp_mmu_enabled) { + if (is_tdp_mmu_enabled(kvm)) { flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); if (flush) kvm_flush_remote_tlbs(kvm); @@ -5498,7 +5498,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, write_lock(&kvm->mmu_lock); flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, start_level, KVM_MAX_HUGEPAGE_LEVEL, false); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); write_unlock(&kvm->mmu_lock); @@ -5564,7 +5564,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, kvm_mmu_zap_collapsible_spte, true); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot); write_unlock(&kvm->mmu_lock); } @@ -5591,7 +5591,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, write_lock(&kvm->mmu_lock); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); write_unlock(&kvm->mmu_lock); @@ -5614,7 +5614,7 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, write_lock(&kvm->mmu_lock); flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, false); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M); write_unlock(&kvm->mmu_lock); @@ -5630,7 +5630,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, write_lock(&kvm->mmu_lock); flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot); write_unlock(&kvm->mmu_lock); @@ -5658,7 +5658,7 @@ restart: kvm_mmu_commit_zap_page(kvm, &invalid_list); - if (kvm->arch.tdp_mmu_enabled) + if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_zap_all(kvm); write_unlock(&kvm->mmu_lock); @@ -5969,7 +5969,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) struct kvm_mmu_page, lpage_disallowed_link); WARN_ON_ONCE(!sp->lpage_disallowed); - if (sp->tdp_mmu_page) { + if (is_tdp_mmu_page(sp)) { kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); } else { diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 98db78a26957..9e38d3c5daad 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -56,10 +56,12 @@ struct kvm_mmu_page { /* Number of writes since the last time traversal visited this page. */ atomic_t write_flooding_count; +#ifdef CONFIG_X86_64 bool tdp_mmu_page; /* Used for freeing the page asyncronously if it is a TDP MMU page. */ struct rcu_head rcu_head; +#endif }; extern struct kmem_cache *mmu_page_header_cache; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index aa0845d5e1e4..ec99e50a7ba7 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -10,24 +10,13 @@ #include #include -#ifdef CONFIG_X86_64 static bool __read_mostly tdp_mmu_enabled = false; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); -#endif - -static bool is_tdp_mmu_enabled(void) -{ -#ifdef CONFIG_X86_64 - return tdp_enabled && READ_ONCE(tdp_mmu_enabled); -#else - return false; -#endif /* CONFIG_X86_64 */ -} /* Initializes the TDP MMU for the VM, if enabled. */ void kvm_mmu_init_tdp_mmu(struct kvm *kvm) { - if (!is_tdp_mmu_enabled()) + if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) return; /* This should not be changed for the lifetime of the VM. */ @@ -96,22 +85,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, #define for_each_tdp_mmu_root(_kvm, _root) \ list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) -bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) -{ - struct kvm_mmu_page *sp; - - if (!kvm->arch.tdp_mmu_enabled) - return false; - if (WARN_ON(!VALID_PAGE(hpa))) - return false; - - sp = to_shadow_page(hpa); - if (WARN_ON(!sp)) - return false; - - return sp->tdp_mmu_page && sp->root_count; -} - static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, bool can_yield); diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index cbbdbadd1526..b4b65e3699b3 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -5,10 +5,6 @@ #include -void kvm_mmu_init_tdp_mmu(struct kvm *kvm); -void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); - -bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root); hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); @@ -47,4 +43,32 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level); +#ifdef CONFIG_X86_64 +void kvm_mmu_init_tdp_mmu(struct kvm *kvm); +void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); +static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } +static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } +#else +static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} +static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} +static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } +static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } +#endif + +static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) +{ + struct kvm_mmu_page *sp; + + if (!is_tdp_mmu_enabled(kvm)) + return false; + if (WARN_ON(!VALID_PAGE(hpa))) + return false; + + sp = to_shadow_page(hpa); + if (WARN_ON(!sp)) + return false; + + return is_tdp_mmu_page(sp) && sp->root_count; +} + #endif /* __KVM_X86_MMU_TDP_MMU_H */ -- cgit v1.2.3 From 9fd6dad1261a541b3f5fa7dc5b152222306e6702 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 5 Feb 2021 05:07:11 -0500 Subject: mm: provide a saner PTE walking API for modules Currently, the follow_pfn function is exported for modules but follow_pte is not. However, follow_pfn is very easy to misuse, because it does not provide protections (so most of its callers assume the page is writable!) and because it returns after having already unlocked the page table lock. Provide instead a simplified version of follow_pte that does not have the pmdpp and range arguments. The older version survives as follow_invalidate_pte() for use by fs/dax.c. Reviewed-by: Jason Gunthorpe Signed-off-by: Paolo Bonzini --- arch/s390/pci/pci_mmio.c | 4 ++-- fs/dax.c | 5 +++-- include/linux/mm.h | 6 ++++-- mm/memory.c | 41 ++++++++++++++++++++++++++++++++++++----- virt/kvm/kvm_main.c | 4 ++-- 5 files changed, 47 insertions(+), 13 deletions(-) diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 18f2d10c3176..474617b88648 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -170,7 +170,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl); if (ret) goto out_unlock_mmap; @@ -311,7 +311,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl); if (ret) goto out_unlock_mmap; diff --git a/fs/dax.c b/fs/dax.c index 26d5dcd2d69e..b3d27fdc6775 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -810,11 +810,12 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, address = pgoff_address(index, vma); /* - * Note because we provide range to follow_pte it will call + * follow_invalidate_pte() will use the range to call * mmu_notifier_invalidate_range_start() on our behalf before * taking any lock. */ - if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl)) + if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep, + &pmdp, &ptl)) continue; /* diff --git a/include/linux/mm.h b/include/linux/mm.h index ecdf8a8cd6ae..24b292fce8e5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1658,9 +1658,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp); int follow_pte(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, - spinlock_t **ptlp); + pte_t **ptepp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, diff --git a/mm/memory.c b/mm/memory.c index feff48e1465a..985dac0958dc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4709,9 +4709,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) } #endif /* __PAGETABLE_PMD_FOLDED */ -int follow_pte(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, - spinlock_t **ptlp) +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp) { pgd_t *pgd; p4d_t *p4d; @@ -4776,6 +4776,34 @@ out: return -EINVAL; } +/** + * follow_pte - look up PTE at a user virtual address + * @mm: the mm_struct of the target address space + * @address: user virtual address + * @ptepp: location to store found PTE + * @ptlp: location to store the lock for the PTE + * + * On a successful return, the pointer to the PTE is stored in @ptepp; + * the corresponding lock is taken and its location is stored in @ptlp. + * The contents of the PTE are only stable until @ptlp is released; + * any further use, if any, must be protected against invalidation + * with MMU notifiers. + * + * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore + * should be taken for read. + * + * KVM uses this function. While it is arguably less bad than ``follow_pfn``, + * it is not a good general-purpose API. + * + * Return: zero on success, -ve otherwise. + */ +int follow_pte(struct mm_struct *mm, unsigned long address, + pte_t **ptepp, spinlock_t **ptlp) +{ + return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp); +} +EXPORT_SYMBOL_GPL(follow_pte); + /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping @@ -4784,6 +4812,9 @@ out: * * Only IO mappings and raw PFN mappings are allowed. * + * This function does not allow the caller to read the permissions + * of the PTE. Do not use it. + * * Return: zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, @@ -4796,7 +4827,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address, if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return ret; - ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; *pfn = pte_pfn(*ptep); @@ -4817,7 +4848,7 @@ int follow_phys(struct vm_area_struct *vma, if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; - if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl)) + if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; pte = *ptep; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 48ccdf4e3d04..ee4ac2618ec5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1911,7 +1911,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, spinlock_t *ptl; int r; - r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl); + r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); if (r) { /* * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does @@ -1926,7 +1926,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, if (r) return r; - r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl); + r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); if (r) return r; } -- cgit v1.2.3 From a9545779ee9e9e103648f6f2552e73cfe808d0f4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 8 Feb 2021 12:19:40 -0800 Subject: KVM: Use kvm_pfn_t for local PFN variable in hva_to_pfn_remapped() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use kvm_pfn_t, a.k.a. u64, for the local 'pfn' variable when retrieving a so called "remapped" hva/pfn pair. In theory, the hva could resolve to a pfn in high memory on a 32-bit kernel. This bug was inadvertantly exposed by commit bd2fae8da794 ("KVM: do not assume PTE is writable after follow_pfn"), which added an error PFN value to the mix, causing gcc to comlain about overflowing the unsigned long. arch/x86/kvm/../../../virt/kvm/kvm_main.c: In function ‘hva_to_pfn_remapped’: include/linux/kvm_host.h:89:30: error: conversion from ‘long long unsigned int’ to ‘long unsigned int’ changes value from ‘9218868437227405314’ to ‘2’ [-Werror=overflow] 89 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) | ^ virt/kvm/kvm_main.c:1935:9: note: in expansion of macro ‘KVM_PFN_ERR_RO_FAULT’ Cc: stable@vger.kernel.org Fixes: add6a0cd1c5b ("KVM: MMU: try to fix up page faults before giving up") Signed-off-by: Sean Christopherson Message-Id: <20210208201940.1258328-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ee4ac2618ec5..001b9de4e727 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1906,7 +1906,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, bool write_fault, bool *writable, kvm_pfn_t *p_pfn) { - unsigned long pfn; + kvm_pfn_t pfn; pte_t *ptep; spinlock_t *ptl; int r; -- cgit v1.2.3 From 99df541dcc97ce4436528a4eeb23165d8c672a64 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 8 Feb 2021 23:23:26 +0000 Subject: KVM: x86/xen: Remove extra unlock in kvm_xen_hvm_set_attr() This accidentally ended up locking and then immediately unlocking kvm->lock at the beginning of the function. Fix it. Fixes: a76b9641ad1c ("KVM: x86/xen: add KVM_XEN_HVM_SET_ATTR/KVM_XEN_HVM_GET_ATTR") Signed-off-by: David Woodhouse Message-Id: <20210208232326.1830370-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 39a7ffcdcf22..54a4895d4440 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -105,8 +105,6 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) mutex_lock(&kvm->lock); - mutex_unlock(&kvm->lock); - switch (data->type) { case KVM_XEN_ATTR_TYPE_LONG_MODE: if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { -- cgit v1.2.3 From 448841f0b7b50f1b8d7a40138dfb0d882435c8bd Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 8 Feb 2021 12:15:02 -0800 Subject: KVM: x86/xen: Use hva_t for holding hypercall page address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use hva_t, a.k.a. unsigned long, for the local variable that holds the hypercall page address. On 32-bit KVM, gcc complains about using a u64 due to the implicit cast from a 64-bit value to a 32-bit pointer. arch/x86/kvm/xen.c: In function ‘kvm_xen_write_hypercall_page’: arch/x86/kvm/xen.c:300:22: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] 300 | page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); Cc: Joao Martins Cc: David Woodhouse Fixes: 23200b7a30de ("KVM: x86/xen: intercept xen hypercalls if enabled") Signed-off-by: Sean Christopherson Message-Id: <20210208201502.1239867-1-seanjc@google.com> Acked-by: David Woodhouse Reviewed-by: Joao Martins Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 54a4895d4440..b414fed1576b 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -284,8 +284,12 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) return 1; } } else { - u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 - : kvm->arch.xen_hvm_config.blob_addr_32; + /* + * Note, truncation is a non-issue as 'lm' is guaranteed to be + * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. + */ + hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 + : kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u8 *page; -- cgit v1.2.3 From 16d5163f338753fb93b1809e067da5d27d540fbc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 4 Feb 2021 17:24:57 -0800 Subject: KVM: x86: Remove misleading DR6/DR7 adjustments from RSM emulation Drop the DR6/7 volatile+fixed bits adjustments in RSM emulation, which are redundant and misleading. The necessary adjustments are made by kvm_set_dr(), which properly sets the fixed bits that are conditional on the vCPU model. Note, KVM incorrectly reads only bits 31:0 of the DR6/7 fields when emulating RSM on x86-64. On the plus side for this change, that bug makes removing "& DRx_VOLATILE" a nop. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210205012458.3872687-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 612ee691065f..b5f433d01caa 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2506,12 +2506,12 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, val = GET_SMSTATE(u32, smstate, 0x7fcc); - if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 6, val)) return X86EMUL_UNHANDLEABLE; val = GET_SMSTATE(u32, smstate, 0x7fc8); - if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 7, val)) return X86EMUL_UNHANDLEABLE; selector = GET_SMSTATE(u32, smstate, 0x7fc4); @@ -2566,12 +2566,12 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, val = GET_SMSTATE(u32, smstate, 0x7f68); - if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 6, val)) return X86EMUL_UNHANDLEABLE; val = GET_SMSTATE(u32, smstate, 0x7f60); - if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1)) + if (ctxt->ops->set_dr(ctxt, 7, val)) return X86EMUL_UNHANDLEABLE; cr0 = GET_SMSTATE(u64, smstate, 0x7f58); -- cgit v1.2.3 From 2644312052d54e2e7543c7d186899a36ed22f0bf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 4 Feb 2021 17:24:58 -0800 Subject: KVM: x86: Restore all 64 bits of DR6 and DR7 during RSM on x86-64 Restore the full 64-bit values of DR6 and DR7 when emulating RSM on x86-64, as defined by both Intel's SDM and AMD's APM. Note, bits 63:32 of DR6 and DR7 are reserved, so this is a glorified nop unless the SMM handler is poking into SMRAM, which it most definitely shouldn't be doing since both Intel and AMD list the DR6 and DR7 fields as read-only. Fixes: 660a5d517aaa ("KVM: x86: save/load state on SMM switch") Signed-off-by: Sean Christopherson Message-Id: <20210205012458.3872687-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b5f433d01caa..f7970ba6219f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2564,12 +2564,12 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; - val = GET_SMSTATE(u32, smstate, 0x7f68); + val = GET_SMSTATE(u64, smstate, 0x7f68); if (ctxt->ops->set_dr(ctxt, 6, val)) return X86EMUL_UNHANDLEABLE; - val = GET_SMSTATE(u32, smstate, 0x7f60); + val = GET_SMSTATE(u64, smstate, 0x7f60); if (ctxt->ops->set_dr(ctxt, 7, val)) return X86EMUL_UNHANDLEABLE; -- cgit v1.2.3 From e6c804a848d604438bd9a083939569a7c9ff2bb5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 4 Feb 2021 16:57:42 -0800 Subject: KVM: SVM: Move AVIC vCPU kicking snippet to helper function Add a helper function to handle kicking non-running vCPUs when sending virtual IPIs. A future patch will change SVM's interception functions to take @vcpu instead of @svm, at which piont declaring and modifying 'vcpu' in a case statement is confusing, and potentially dangerous. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210205005750.3841462-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/avic.c | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 0ef84d57b72e..78bdcfac4e40 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -298,6 +298,23 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) return 0; } +static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, + u32 icrl, u32 icrh) +{ + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + bool m = kvm_apic_match_dest(vcpu, source, + icrl & APIC_SHORT_MASK, + GET_APIC_DEST_FIELD(icrh), + icrl & APIC_DEST_MASK); + + if (m && !avic_vcpu_is_running(vcpu)) + kvm_vcpu_wake_up(vcpu); + } +} + int avic_incomplete_ipi_interception(struct vcpu_svm *svm) { u32 icrh = svm->vmcb->control.exit_info_1 >> 32; @@ -324,28 +341,14 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm) kvm_lapic_reg_write(apic, APIC_ICR2, icrh); kvm_lapic_reg_write(apic, APIC_ICR, icrl); break; - case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { - int i; - struct kvm_vcpu *vcpu; - struct kvm *kvm = svm->vcpu.kvm; - struct kvm_lapic *apic = svm->vcpu.arch.apic; - + case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: /* * At this point, we expect that the AVIC HW has already * set the appropriate IRR bits on the valid target * vcpus. So, we just need to kick the appropriate vcpu. */ - kvm_for_each_vcpu(i, vcpu, kvm) { - bool m = kvm_apic_match_dest(vcpu, apic, - icrl & APIC_SHORT_MASK, - GET_APIC_DEST_FIELD(icrh), - icrl & APIC_DEST_MASK); - - if (m && !avic_vcpu_is_running(vcpu)) - kvm_vcpu_wake_up(vcpu); - } + avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh); break; - } case AVIC_IPI_FAILURE_INVALID_TARGET: WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", index, svm->vcpu.vcpu_id, icrh, icrl); -- cgit v1.2.3 From 6f7a343987fa24299db1a36b7af55bdbd5dd3b1b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 4 Feb 2021 16:57:43 -0800 Subject: KVM: SVM: Remove an unnecessary forward declaration Drop a defunct forward declaration of svm_complete_interrupts(). No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210205005750.3841462-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 4141caea857a..ef2f2b0f20d3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -205,8 +205,6 @@ bool svm_gp_erratum_intercept = true; static u8 rsm_ins_bytes[] = "\x0f\xaa"; -static void svm_complete_interrupts(struct vcpu_svm *svm); - static unsigned long iopm_base; struct kvm_ldttss_desc { -- cgit v1.2.3 From 29d6ca41992a771e2441424099695c329d735c28 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 3 Feb 2021 03:42:41 -0500 Subject: KVM: x86: reading DR cannot fail kvm_get_dr and emulator_get_dr except an in-range value for the register number so they cannot fail. Change the return type to void. Suggested-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/kvm_emulate.h | 2 +- arch/x86/kvm/vmx/vmx.c | 3 +-- arch/x86/kvm/x86.c | 9 ++++----- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1ed1206c196d..c451412a6230 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1562,7 +1562,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); -int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); +void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 43c93ffa76ed..0d359115429a 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -205,7 +205,7 @@ struct x86_emulate_ops { ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); int (*cpl)(struct x86_emulate_ctxt *ctxt); - int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); + void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt); void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 777177ea9a35..049fbbd0aa1a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5142,8 +5142,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; - if (kvm_get_dr(vcpu, dr, &val)) - return 1; + kvm_get_dr(vcpu, dr, &val); kvm_register_write(vcpu, reg, val); } else if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d9f931c63293..dcb67429b75d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1181,7 +1181,7 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) } EXPORT_SYMBOL_GPL(kvm_set_dr); -int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) +void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { size_t size = ARRAY_SIZE(vcpu->arch.db); @@ -1198,7 +1198,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) *val = vcpu->arch.dr7; break; } - return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); @@ -6610,10 +6609,10 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); } -static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, - unsigned long *dest) +static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long *dest) { - return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); + kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, -- cgit v1.2.3 From 996ff5429e9898947016206ee9da387a93d63d9d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 14 Dec 2020 07:49:54 -0500 Subject: KVM: x86: move kvm_inject_gp up from kvm_set_dr to callers Push the injection of #GP up to the callers, so that they can just use kvm_complete_insn_gp. __kvm_set_dr is pretty much what the callers can use together with kvm_complete_insn_gp, so rename it to kvm_set_dr and drop the old kvm_set_dr wrapper. This also allows nested VMX code, which really wanted to use __kvm_set_dr, to use the right function. While at it, remove the kvm_require_dr() check from the SVM interception. The APM states: All normal exception checks take precedence over the SVM intercepts. which includes the CR4.DE=1 #UD. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 13 +++++-------- arch/x86/kvm/vmx/vmx.c | 17 ++++++++++------- arch/x86/kvm/x86.c | 19 +++++-------------- 3 files changed, 20 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ef2f2b0f20d3..42d4710074a6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2617,6 +2617,7 @@ static int dr_interception(struct vcpu_svm *svm) { int reg, dr; unsigned long val; + int err = 0; if (svm->vcpu.guest_debug == 0) { /* @@ -2634,20 +2635,16 @@ static int dr_interception(struct vcpu_svm *svm) reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; - - if (dr >= 16) { /* mov to DRn */ - if (!kvm_require_dr(&svm->vcpu, dr - 16)) - return 1; + if (dr >= 16) { /* mov to DRn */ + dr -= 16; val = kvm_register_read(&svm->vcpu, reg); - kvm_set_dr(&svm->vcpu, dr - 16, val); + err = kvm_set_dr(&svm->vcpu, dr, val); } else { - if (!kvm_require_dr(&svm->vcpu, dr)) - return 1; kvm_get_dr(&svm->vcpu, dr, &val); kvm_register_write(&svm->vcpu, reg, val); } - return kvm_skip_emulated_instruction(&svm->vcpu); + return kvm_complete_insn_gp(&svm->vcpu, err); } static int cr8_write_interception(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 049fbbd0aa1a..13898871e5b0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5095,6 +5095,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, dr7, reg; + int err = 1; exit_qualification = vmx_get_exit_qual(vcpu); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; @@ -5103,9 +5104,9 @@ static int handle_dr(struct kvm_vcpu *vcpu) if (!kvm_require_dr(vcpu, dr)) return 1; - /* Do not handle if the CPL > 0, will trigger GP on re-entry */ - if (!kvm_require_cpl(vcpu, 0)) - return 1; + if (kvm_x86_ops.get_cpl(vcpu) > 0) + goto out; + dr7 = vmcs_readl(GUEST_DR7); if (dr7 & DR7_GD) { /* @@ -5144,11 +5145,13 @@ static int handle_dr(struct kvm_vcpu *vcpu) kvm_get_dr(vcpu, dr, &val); kvm_register_write(vcpu, reg, val); - } else - if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) - return 1; + err = 0; + } else { + err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)); + } - return kvm_skip_emulated_instruction(vcpu); +out: + return kvm_complete_insn_gp(vcpu, err); } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dcb67429b75d..baa90ae76ba5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1143,7 +1143,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) return fixed; } -static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) +int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { size_t size = ARRAY_SIZE(vcpu->arch.db); @@ -1156,13 +1156,13 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) case 4: case 6: if (!kvm_dr6_valid(val)) - return -1; /* #GP */ + return 1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; case 5: default: /* 7 */ if (!kvm_dr7_valid(val)) - return -1; /* #GP */ + return 1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; @@ -1170,15 +1170,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) return 0; } - -int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) -{ - if (__kvm_set_dr(vcpu, dr, val)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - return 0; -} EXPORT_SYMBOL_GPL(kvm_set_dr); void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) @@ -6619,7 +6610,7 @@ static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { - return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); + return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) @@ -8664,7 +8655,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) dt.address = dt.size = 0; static_call(kvm_x86_set_idt)(vcpu, &dt); - __kvm_set_dr(vcpu, 7, DR7_FIXED_1); + kvm_set_dr(vcpu, 7, DR7_FIXED_1); cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; cs.base = vcpu->arch.smbase; -- cgit v1.2.3 From 281d9cd9b471a28382ac79be9b5cd59b72ae5c87 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 27 Jan 2021 18:57:31 +0100 Subject: selftests: kvm: Raise the default timeout to 120 seconds With the updated maximum number of user memslots (32) set_memory_region_test sometimes takes longer than the default 45 seconds to finish. Raise the value to an arbitrary 120 seconds. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210127175731.2020089-6-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/settings | 1 + 1 file changed, 1 insertion(+) create mode 100644 tools/testing/selftests/kvm/settings diff --git a/tools/testing/selftests/kvm/settings b/tools/testing/selftests/kvm/settings new file mode 100644 index 000000000000..6091b45d226b --- /dev/null +++ b/tools/testing/selftests/kvm/settings @@ -0,0 +1 @@ +timeout=120 -- cgit v1.2.3 From 4fc096a99e01dd06dc55bef76ade7f8d76653245 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 28 Jan 2021 13:01:31 -0500 Subject: KVM: Raise the maximum number of user memslots Current KVM_USER_MEM_SLOTS limits are arch specific (512 on Power, 509 on x86, 32 on s390, 16 on MIPS) but they don't really need to be. Memory slots are allocated dynamically in KVM when added so the only real limitation is 'id_to_index' array which is 'short'. We don't have any other KVM_MEM_SLOTS_NUM/KVM_USER_MEM_SLOTS-sized statically defined structures. Low KVM_USER_MEM_SLOTS can be a limiting factor for some configurations. In particular, when QEMU tries to start a Windows guest with Hyper-V SynIC enabled and e.g. 256 vCPUs the limit is hit as SynIC requires two pages per vCPU and the guest is free to pick any GFN for each of them, this fragments memslots as QEMU wants to have a separate memslot for each of these pages (which are supposed to act as 'overlay' pages). Signed-off-by: Vitaly Kuznetsov Message-Id: <20210127175731.2020089-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/s390/include/asm/kvm_host.h | 1 - arch/x86/include/asm/kvm_host.h | 2 -- include/linux/kvm_host.h | 5 ++--- 6 files changed, 2 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8fcfab0c2567..1b8a3d825276 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,7 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 512 #define KVM_HALT_POLL_NS_DEFAULT 500000 #include diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 24f3d0f9996b..3a5612e7304c 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -83,7 +83,6 @@ #define KVM_MAX_VCPUS 16 -#define KVM_USER_MEM_SLOTS 16 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 0 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d67a470e95a3..2b9b6855ec86 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_USER_MEM_SLOTS 512 #include diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 74f9a036bab2..6bcfc5614bbc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_S390_BSCA_CPU_SLOTS 64 #define KVM_S390_ESCA_CPU_SLOTS 248 #define KVM_MAX_VCPUS 255 -#define KVM_USER_MEM_SLOTS 32 /* * These seem to be used for allocating ->chip in the routing table, which we diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c451412a6230..b6de568e7476 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -40,10 +40,8 @@ #define KVM_MAX_VCPUS 288 #define KVM_SOFT_MAX_VCPUS 240 #define KVM_MAX_VCPU_ID 1023 -#define KVM_USER_MEM_SLOTS 509 /* memory slots that are not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 3 -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #define KVM_HALT_POLL_NS_DEFAULT 200000 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f417447129b9..e126ebda36d0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -425,9 +425,8 @@ struct kvm_irq_routing_table { #define KVM_PRIVATE_MEM_SLOTS 0 #endif -#ifndef KVM_MEM_SLOTS_NUM -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) -#endif +#define KVM_MEM_SLOTS_NUM SHRT_MAX +#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 32f00fd9ef39d8bd32ed7a365fc9a134ebbc646f Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:02 +0100 Subject: selftests: kvm: Move kvm_get_supported_hv_cpuid() to common code kvm_get_supported_hv_cpuid() may come handy in all Hyper-V related tests. Split it off hyperv_cpuid test, create system-wide and vcpu versions. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-2-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/x86_64/processor.h | 3 ++ tools/testing/selftests/kvm/lib/x86_64/processor.c | 33 ++++++++++++++++++++++ tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c | 31 ++------------------ 3 files changed, 39 insertions(+), 28 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 582dfb23f30c..42292d676eff 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -406,6 +406,9 @@ bool set_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *ent); uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3); +struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); +struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); + /* * Basic CPU control in CR0 */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 5a15264c2ef2..18483df2b331 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1300,3 +1300,36 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, : "b"(a0), "c"(a1), "d"(a2), "S"(a3)); return r; } + +struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) +{ + static struct kvm_cpuid2 *cpuid; + int ret; + int kvm_fd; + + if (cpuid) + return cpuid; + + cpuid = allocate_kvm_cpuid2(); + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); + if (kvm_fd < 0) + exit(KSFT_SKIP); + + ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n", + ret, errno); + + close(kvm_fd); + return cpuid; +} + +struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + static struct kvm_cpuid2 *cpuid; + + cpuid = allocate_kvm_cpuid2(); + + vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + + return cpuid; +} diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index 88a595b7fbdd..7e2d2d17d2ed 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -125,30 +125,6 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, bool system) " it should have: %d %d", system ? "KVM" : "vCPU", ret, errno); } - -struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm, bool system) -{ - int nent = 20; /* should be enough */ - static struct kvm_cpuid2 *cpuid; - - cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2)); - - if (!cpuid) { - perror("malloc"); - abort(); - } - - cpuid->nent = nent; - - if (!system) - vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid); - else - kvm_ioctl(vm, KVM_GET_SUPPORTED_HV_CPUID, cpuid); - - return cpuid; -} - - int main(int argc, char *argv[]) { struct kvm_vm *vm; @@ -167,7 +143,7 @@ int main(int argc, char *argv[]) /* Test vCPU ioctl version */ test_hv_cpuid_e2big(vm, false); - hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, false); + hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, VCPU_ID); test_hv_cpuid(hv_cpuid_entries, false); free(hv_cpuid_entries); @@ -177,7 +153,7 @@ int main(int argc, char *argv[]) goto do_sys; } vcpu_enable_evmcs(vm, VCPU_ID); - hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, false); + hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, VCPU_ID); test_hv_cpuid(hv_cpuid_entries, true); free(hv_cpuid_entries); @@ -190,9 +166,8 @@ do_sys: test_hv_cpuid_e2big(vm, true); - hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, true); + hv_cpuid_entries = kvm_get_supported_hv_cpuid(); test_hv_cpuid(hv_cpuid_entries, nested_vmx_supported()); - free(hv_cpuid_entries); out: kvm_vm_free(vm); -- cgit v1.2.3 From a75b40a4dd57e624dc89b81abaf6ebd206bf62a0 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:03 +0100 Subject: selftests: kvm: Properly set Hyper-V CPUIDs in evmcs_test Generally, when Hyper-V emulation is enabled, VMM is supposed to set Hyper-V CPUID identifications so the guest knows that Hyper-V features are available. evmcs_test doesn't currently do that but so far Hyper-V emulation in KVM was enabled unconditionally. As we are about to change that, proper Hyper-V CPUID identification should be set in selftests as well. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/evmcs_test.c | 39 ++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index 37b8a78f6b74..39a3cb2bd103 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -78,6 +78,42 @@ void guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(vmlaunch()); } +struct kvm_cpuid2 *guest_get_cpuid(void) +{ + static struct kvm_cpuid2 *cpuid_full; + struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; + int i, nent = 0; + + if (cpuid_full) + return cpuid_full; + + cpuid_sys = kvm_get_supported_cpuid(); + cpuid_hv = kvm_get_supported_hv_cpuid(); + + cpuid_full = malloc(sizeof(*cpuid_full) + + (cpuid_sys->nent + cpuid_hv->nent) * + sizeof(struct kvm_cpuid_entry2)); + if (!cpuid_full) { + perror("malloc"); + abort(); + } + + /* Need to skip KVM CPUID leaves 0x400000xx */ + for (i = 0; i < cpuid_sys->nent; i++) { + if (cpuid_sys->entries[i].function >= 0x40000000 && + cpuid_sys->entries[i].function < 0x40000100) + continue; + cpuid_full->entries[nent] = cpuid_sys->entries[i]; + nent++; + } + + memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, + cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); + cpuid_full->nent = nent + cpuid_hv->nent; + + return cpuid_full; +} + int main(int argc, char *argv[]) { vm_vaddr_t vmx_pages_gva = 0; @@ -99,6 +135,7 @@ int main(int argc, char *argv[]) exit(KSFT_SKIP); } + vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); vcpu_enable_evmcs(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID); @@ -142,7 +179,7 @@ int main(int argc, char *argv[]) /* Restore state in a new VM. */ kvm_vm_restart(vm, O_RDWR); vm_vcpu_add(vm, VCPU_ID); - vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); vcpu_enable_evmcs(vm, VCPU_ID); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); -- cgit v1.2.3 From cb5b916172c6d47f8243212b0c00196ede219d95 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:04 +0100 Subject: KVM: x86: hyper-v: Drop unused kvm_hv_vapic_assist_page_enabled() kvm_hv_vapic_assist_page_enabled() seems to be unused since its introduction in commit 10388a07164c1 ("KVM: Add HYPER-V apic access MSRs"), drop it. Reported-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-4-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index ff4a7cc7d694..74016d21738d 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -125,11 +125,6 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); -static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; -} - int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len); void kvm_lapic_exit(void); -- cgit v1.2.3 From ef3f3980dedffe124727f7baa1932c8491a911e4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:05 +0100 Subject: KVM: x86: hyper-v: Rename vcpu_to_hv_vcpu() to to_hv_vcpu() vcpu_to_hv_vcpu()'s argument is almost always 'vcpu' so there's no need to have an additional prefix. Also, this makes the code more consistent with vmx/svm where to_vmx()/to_svm() are being used. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-5-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 24 ++++++++++++------------ arch/x86/kvm/hyperv.h | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index af7278abe16b..46ccbb1e5e88 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) return NULL; vcpu = kvm_get_vcpu(kvm, vpidx); - if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) + if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) - if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) + if (to_hv_vcpu(vcpu)->vp_index == vpidx) return vcpu; return NULL; } @@ -166,7 +166,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) { struct kvm *kvm = vcpu->kvm; struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv_stimer *stimer; int gsi, idx; @@ -317,7 +317,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) return 1; trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, - vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data); + to_hv_vcpu(vcpu)->vp_index, msr, data); switch (msr) { case HV_X64_MSR_SYNDBG_CONTROL: syndbg->control.control = data; @@ -379,7 +379,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) } trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, - vcpu_to_hv_vcpu(vcpu)->vp_index, msr, + to_hv_vcpu(vcpu)->vp_index, msr, *pdata); return 0; @@ -538,7 +538,7 @@ static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); set_bit(stimer->index, - vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); + to_hv_vcpu(vcpu)->stimer_pending_bitmap); kvm_make_request(KVM_REQ_HV_STIMER, vcpu); if (vcpu_kick) kvm_vcpu_kick(vcpu); @@ -553,7 +553,7 @@ static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) hrtimer_cancel(&stimer->timer); clear_bit(stimer->index, - vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); + to_hv_vcpu(vcpu)->stimer_pending_bitmap); stimer->msg_pending = false; stimer->exp_time = 0; } @@ -802,7 +802,7 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv_stimer *stimer; u64 time_now, exp_time; int i; @@ -832,7 +832,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); int i; for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) @@ -883,7 +883,7 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); int i; synic_init(&hv_vcpu->synic); @@ -895,7 +895,7 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); } @@ -1501,7 +1501,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, + if (test_bit(to_hv_vcpu(vcpu)->vp_index, (unsigned long *)vp_bitmap)) __set_bit(i, vcpu_bitmap); } diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 6d7def2b0aad..f5e0d3d1d54a 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -50,7 +50,7 @@ /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) -static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu) +static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) { return &vcpu->arch.hyperv; } @@ -100,7 +100,7 @@ bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, int timer_index) { - return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index]; + return &to_hv_vcpu(vcpu)->stimer[timer_index]; } static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) -- cgit v1.2.3 From e0121fa29a7f4fc56cf75f5c6a80c41c7c73f593 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:06 +0100 Subject: KVM: x86: hyper-v: Rename vcpu_to_synic()/synic_to_vcpu() vcpu_to_synic()'s argument is almost always 'vcpu' so there's no need to have an additional prefix. Also, as this is used outside of hyper-v emulation code, add '_hv_' part to make it clear what this s. This makes the naming more consistent with to_hv_vcpu(). Rename synic_to_vcpu() to hv_synic_to_vcpu() for consistency. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-6-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 30 +++++++++++++++--------------- arch/x86/kvm/hyperv.h | 4 ++-- arch/x86/kvm/lapic.c | 4 ++-- arch/x86/kvm/x86.c | 2 +- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 46ccbb1e5e88..b9e1f0609ae4 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -129,7 +129,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, synic_update_vector(synic, vector); /* Load SynIC vectors into EOI exit bitmap */ - kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); + kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic)); return 0; } @@ -158,14 +158,14 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) vcpu = get_vcpu_by_vpidx(kvm, vpidx); if (!vcpu) return NULL; - synic = vcpu_to_synic(vcpu); + synic = to_hv_synic(vcpu); return (synic->active) ? synic : NULL; } static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) { struct kvm *kvm = vcpu->kvm; - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv_stimer *stimer; int gsi, idx; @@ -190,7 +190,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; @@ -205,7 +205,7 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 data, bool host) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); int ret; if (!synic->active && !host) @@ -422,7 +422,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); struct kvm_lapic_irq irq; int ret, vector; @@ -458,7 +458,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) { - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); int i; trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); @@ -635,7 +635,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, union hv_stimer_config new_config = {.as_uint64 = config}, old_config = {.as_uint64 = stimer->config.as_uint64}; struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && !host) return 1; @@ -659,7 +659,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, bool host) { struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && !host) return 1; @@ -695,7 +695,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, struct hv_message *src_msg, bool no_retry) { - struct kvm_vcpu *vcpu = synic_to_vcpu(synic); + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); int msg_off = offsetof(struct hv_message_page, sint_message[sint]); gfn_t msg_page_gfn; struct hv_message_header hv_hdr; @@ -764,7 +764,7 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) payload->expiration_time = stimer->exp_time; payload->delivery_time = get_time_ref_counter(vcpu->kvm); - return synic_deliver_msg(vcpu_to_synic(vcpu), + return synic_deliver_msg(to_hv_synic(vcpu), stimer->config.sintx, msg, no_retry); } @@ -902,7 +902,7 @@ void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) { - struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); /* * Hyper-V SynIC auto EOI SINT's are @@ -1309,7 +1309,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_SIMP: case HV_X64_MSR_EOM: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: - return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); + return synic_set_msr(to_hv_synic(vcpu), msr, data, host); case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER2_CONFIG: @@ -1421,7 +1421,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_SIMP: case HV_X64_MSR_EOM: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: - return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host); + return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host); case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER2_CONFIG: @@ -1811,7 +1811,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) fallthrough; /* maybe userspace knows this conn_id */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ - if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { + if (unlikely(rep || !to_hv_synic(vcpu)->active)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; } diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index f5e0d3d1d54a..d47b3f045a25 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -63,12 +63,12 @@ static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu) return container_of(arch, struct kvm_vcpu, arch); } -static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu) +static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) { return &vcpu->arch.hyperv.synic; } -static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) +static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) { return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic)); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9e0f78c0a256..847fe11a49f5 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1245,7 +1245,7 @@ static int apic_set_eoi(struct kvm_lapic *apic) apic_clear_isr(vector, apic); apic_update_ppr(apic); - if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap)) + if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) kvm_hv_synic_send_eoi(apic->vcpu, vector); kvm_ioapic_send_eoi(apic, vector); @@ -2512,7 +2512,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) */ apic_clear_irr(vector, apic); - if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) { + if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { /* * For auto-EOI interrupts, there might be another pending * interrupt above PPR, so check whether to raise another diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index baa90ae76ba5..4d410c722728 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8804,7 +8804,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) return; bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, - vcpu_to_synic(vcpu)->vec_bitmap, 256); + to_hv_synic(vcpu)->vec_bitmap, 256); static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); } -- cgit v1.2.3 From aafa97fd1c01bc82c1f288bf1f27e8e1bdc36a3e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:07 +0100 Subject: KVM: x86: hyper-v: Rename vcpu_to_stimer()/stimer_to_vcpu() vcpu_to_stimers()'s argument is almost always 'vcpu' so there's no need to have an additional prefix. Also, this makes the naming more consistent with to_hv_vcpu()/to_hv_synic(). Rename stimer_to_vcpu() to hv_stimer_to_vcpu() for consitency. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-7-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 36 ++++++++++++++++++------------------ arch/x86/kvm/hyperv.h | 6 +++--- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b9e1f0609ae4..b02adaa0bb53 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -535,7 +535,7 @@ static u64 get_time_ref_counter(struct kvm *kvm) static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, bool vcpu_kick) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); set_bit(stimer->index, to_hv_vcpu(vcpu)->stimer_pending_bitmap); @@ -546,9 +546,9 @@ static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); - trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index); hrtimer_cancel(&stimer->timer); @@ -563,7 +563,7 @@ static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) struct kvm_vcpu_hv_stimer *stimer; stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); - trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index); stimer_mark_pending(stimer, true); @@ -580,7 +580,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) u64 time_now; ktime_t ktime_now; - time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); + time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm); ktime_now = ktime_get(); if (stimer->config.periodic) { @@ -597,7 +597,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) stimer->exp_time = time_now + stimer->count; trace_kvm_hv_stimer_start_periodic( - stimer_to_vcpu(stimer)->vcpu_id, + hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, time_now, stimer->exp_time); @@ -619,7 +619,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) return 0; } - trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, time_now, stimer->count); @@ -634,13 +634,13 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, { union hv_stimer_config new_config = {.as_uint64 = config}, old_config = {.as_uint64 = stimer->config.as_uint64}; - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && !host) return 1; - trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, config, host); stimer_cleanup(stimer); @@ -658,13 +658,13 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, bool host) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && !host) return 1; - trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, count, host); stimer_cleanup(stimer); @@ -751,7 +751,7 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct hv_message *msg = &stimer->msg; struct hv_timer_message_payload *payload = (struct hv_timer_message_payload *)&msg->u.payload; @@ -771,7 +771,7 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) { - struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); struct kvm_lapic_irq irq = { .delivery_mode = APIC_DM_FIXED, .vector = stimer->config.apic_vector @@ -791,7 +791,7 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) r = stimer_send_msg(stimer); else r = stimer_notify_direct(stimer); - trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, + trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, direct, r); if (!r) { stimer->msg_pending = false; @@ -1316,7 +1316,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_STIMER3_CONFIG: { int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; - return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), + return stimer_set_config(to_hv_stimer(vcpu, timer_index), data, host); } case HV_X64_MSR_STIMER0_COUNT: @@ -1325,7 +1325,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_STIMER3_COUNT: { int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; - return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), + return stimer_set_count(to_hv_stimer(vcpu, timer_index), data, host); } case HV_X64_MSR_TSC_FREQUENCY: @@ -1428,7 +1428,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_STIMER3_CONFIG: { int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; - return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), + return stimer_get_config(to_hv_stimer(vcpu, timer_index), pdata); } case HV_X64_MSR_STIMER0_COUNT: @@ -1437,7 +1437,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_STIMER3_COUNT: { int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; - return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), + return stimer_get_count(to_hv_stimer(vcpu, timer_index), pdata); } case HV_X64_MSR_TSC_FREQUENCY: diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index d47b3f045a25..48fcabacd339 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -97,13 +97,13 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, struct hv_vp_assist_page *assist_page); -static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, - int timer_index) +static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, + int timer_index) { return &to_hv_vcpu(vcpu)->stimer[timer_index]; } -static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) +static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) { struct kvm_vcpu_hv *hv_vcpu; -- cgit v1.2.3 From f69b55efef8406113c52f7494d2669ba9d4958c8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:08 +0100 Subject: KVM: x86: hyper-v: Rename vcpu_to_hv_syndbg() to to_hv_syndbg() vcpu_to_hv_syndbg()'s argument is always 'vcpu' so there's no need to have an additional prefix. Also, this makes the code more consistent with vmx/svm where to_vmx()/to_svm() are being used. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-8-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 8 ++++---- arch/x86/kvm/hyperv.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b02adaa0bb53..b7e3fab01189 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -294,7 +294,7 @@ static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; @@ -311,7 +311,7 @@ static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) return 1; @@ -350,7 +350,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) return 1; @@ -1873,7 +1873,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) } fallthrough; case HVCALL_RESET_DEBUG_SESSION: { - struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); + struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu)) { ret = HV_STATUS_INVALID_HYPERCALL_CODE; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 48fcabacd339..220849cc12e5 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -73,7 +73,7 @@ static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic)); } -static inline struct kvm_hv_syndbg *vcpu_to_hv_syndbg(struct kvm_vcpu *vcpu) +static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) { return &vcpu->kvm->arch.hyperv.hv_syndbg; } -- cgit v1.2.3 From 05f04ae4ffcc77cfcda86adc4e2c13aa72143c03 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:09 +0100 Subject: KVM: x86: hyper-v: Introduce to_kvm_hv() helper Spelling '&kvm->arch.hyperv' correctly is hard. Also, this makes the code more consistent with vmx/svm where to_kvm_vmx()/to_kvm_svm() are already being used. Opportunistically change kvm_hv_msr_{get,set}_crash_{data,ctl}() and kvm_hv_msr_set_crash_data() to take 'kvm' instead of 'vcpu' as these MSRs are partition wide. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-9-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 107 ++++++++++++++++++++++++++----------------------- arch/x86/kvm/hyperv.h | 5 +++ arch/x86/kvm/vmx/vmx.c | 3 +- arch/x86/kvm/x86.c | 2 +- 4 files changed, 64 insertions(+), 53 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b7e3fab01189..d8504fc43825 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -283,8 +283,7 @@ static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu) static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) { - struct kvm *kvm = vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) hv->hv_syndbg.control.status = @@ -515,7 +514,7 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic) static u64 get_time_ref_counter(struct kvm *kvm) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_vcpu *vcpu; u64 tsc; @@ -940,10 +939,9 @@ static bool kvm_hv_msr_partition_wide(u32 msr) return r; } -static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, - u32 index, u64 *pdata) +static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); size_t size = ARRAY_SIZE(hv->hv_crash_param); if (WARN_ON_ONCE(index >= size)) @@ -953,41 +951,26 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, return 0; } -static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) +static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); *pdata = hv->hv_crash_ctl; return 0; } -static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) +static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; - - if (host) - hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; - - if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) { + struct kvm_hv *hv = to_kvm_hv(kvm); - vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", - hv->hv_crash_param[0], - hv->hv_crash_param[1], - hv->hv_crash_param[2], - hv->hv_crash_param[3], - hv->hv_crash_param[4]); - - /* Send notification about crash to user space */ - kvm_make_request(KVM_REQ_HV_CRASH, vcpu); - } + hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; return 0; } -static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, - u32 index, u64 data) +static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); size_t size = ARRAY_SIZE(hv->hv_crash_param); if (WARN_ON_ONCE(index >= size)) @@ -1069,7 +1052,7 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); u32 tsc_seq; u64 gfn; @@ -1079,7 +1062,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) return; - mutex_lock(&kvm->arch.hyperv.hv_lock); + mutex_lock(&hv->hv_lock); if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) goto out_unlock; @@ -1123,14 +1106,14 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); out_unlock: - mutex_unlock(&kvm->arch.hyperv.hv_lock); + mutex_unlock(&hv->hv_lock); } static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { struct kvm *kvm = vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); switch (msr) { case HV_X64_MSR_GUEST_OS_ID: @@ -1186,11 +1169,25 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: - return kvm_hv_msr_set_crash_data(vcpu, + return kvm_hv_msr_set_crash_data(kvm, msr - HV_X64_MSR_CRASH_P0, data); case HV_X64_MSR_CRASH_CTL: - return kvm_hv_msr_set_crash_ctl(vcpu, data, host); + if (host) + return kvm_hv_msr_set_crash_ctl(kvm, data); + + if (data & HV_CRASH_CTL_CRASH_NOTIFY) { + vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", + hv->hv_crash_param[0], + hv->hv_crash_param[1], + hv->hv_crash_param[2], + hv->hv_crash_param[3], + hv->hv_crash_param[4]); + + /* Send notification about crash to user space */ + kvm_make_request(KVM_REQ_HV_CRASH, vcpu); + } + break; case HV_X64_MSR_RESET: if (data == 1) { vcpu_debug(vcpu, "hyper-v reset requested\n"); @@ -1238,7 +1235,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) switch (msr) { case HV_X64_MSR_VP_INDEX: { - struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); int vcpu_idx = kvm_vcpu_get_idx(vcpu); u32 new_vp_index = (u32)data; @@ -1348,7 +1345,7 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, { u64 data = 0; struct kvm *kvm = vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); switch (msr) { case HV_X64_MSR_GUEST_OS_ID: @@ -1364,11 +1361,11 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, data = hv->hv_tsc_page; break; case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: - return kvm_hv_msr_get_crash_data(vcpu, + return kvm_hv_msr_get_crash_data(kvm, msr - HV_X64_MSR_CRASH_P0, pdata); case HV_X64_MSR_CRASH_CTL: - return kvm_hv_msr_get_crash_ctl(vcpu, pdata); + return kvm_hv_msr_get_crash_ctl(kvm, pdata); case HV_X64_MSR_RESET: data = 0; break; @@ -1456,12 +1453,14 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + if (kvm_hv_msr_partition_wide(msr)) { int r; - mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_lock(&hv->hv_lock); r = kvm_hv_set_msr_pw(vcpu, msr, data, host); - mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_unlock(&hv->hv_lock); return r; } else return kvm_hv_set_msr(vcpu, msr, data, host); @@ -1469,12 +1468,14 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + if (kvm_hv_msr_partition_wide(msr)) { int r; - mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_lock(&hv->hv_lock); r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host); - mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); + mutex_unlock(&hv->hv_lock); return r; } else return kvm_hv_get_msr(vcpu, msr, pdata, host); @@ -1484,7 +1485,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask, u64 *vp_bitmap, unsigned long *vcpu_bitmap) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_vcpu *vcpu; int i, bank, sbank = 0; @@ -1686,7 +1687,7 @@ ret_success: bool kvm_hv_hypercall_enabled(struct kvm *kvm) { - return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0; + return to_kvm_hv(kvm)->hv_guest_os_id != 0; } static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) @@ -1716,6 +1717,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) { + struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); struct eventfd_ctx *eventfd; if (unlikely(!fast)) { @@ -1744,7 +1746,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ rcu_read_lock(); - eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); + eventfd = idr_find(&hv->conn_to_evt, param); rcu_read_unlock(); if (!eventfd) return HV_STATUS_INVALID_PORT_ID; @@ -1903,23 +1905,26 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) void kvm_hv_init_vm(struct kvm *kvm) { - mutex_init(&kvm->arch.hyperv.hv_lock); - idr_init(&kvm->arch.hyperv.conn_to_evt); + struct kvm_hv *hv = to_kvm_hv(kvm); + + mutex_init(&hv->hv_lock); + idr_init(&hv->conn_to_evt); } void kvm_hv_destroy_vm(struct kvm *kvm) { + struct kvm_hv *hv = to_kvm_hv(kvm); struct eventfd_ctx *eventfd; int i; - idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) + idr_for_each_entry(&hv->conn_to_evt, eventfd, i) eventfd_ctx_put(eventfd); - idr_destroy(&kvm->arch.hyperv.conn_to_evt); + idr_destroy(&hv->conn_to_evt); } static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct eventfd_ctx *eventfd; int ret; @@ -1943,7 +1948,7 @@ static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id) { - struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_hv *hv = to_kvm_hv(kvm); struct eventfd_ctx *eventfd; mutex_lock(&hv->hv_lock); diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 220849cc12e5..fdb321ba9c3f 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -50,6 +50,11 @@ /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) +static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm) +{ + return &kvm->arch.hyperv; +} + static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) { return &vcpu->arch.hyperv; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 13898871e5b0..8f75374b221b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -50,6 +50,7 @@ #include "capabilities.h" #include "cpuid.h" #include "evmcs.h" +#include "hyperv.h" #include "irq.h" #include "kvm_cache_regs.h" #include "lapic.h" @@ -552,7 +553,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) { struct hv_enlightened_vmcs *evmcs; struct hv_partition_assist_pg **p_hv_pa_pg = - &vcpu->kvm->arch.hyperv.hv_pa_pg; + &to_kvm_hv(vcpu->kvm)->hv_pa_pg; /* * Synthetic VM-Exit is not enabled in current code and so All * evmcs in singe VM shares same assist page. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4d410c722728..1f93879c7364 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10433,7 +10433,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_free_vm(struct kvm *kvm) { - kfree(kvm->arch.hyperv.hv_pa_pg); + kfree(to_kvm_hv(kvm)->hv_pa_pg); vfree(kvm); } -- cgit v1.2.3 From 72167a9d7da2c295caf1d4a2d58128406786505d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:10 +0100 Subject: KVM: x86: hyper-v: Stop shadowing global 'current_vcpu' variable 'current_vcpu' variable in KVM is a per-cpu pointer to the currently scheduled vcpu. kvm_hv_flush_tlb()/kvm_hv_send_ipi() functions used to have local 'vcpu' variable to iterate over vCPUs but it's gone now and there's no need to use anything but the standard 'vcpu' as an argument. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-10-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index d8504fc43825..b49e8ca6be06 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1509,11 +1509,10 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( return vcpu_bitmap; } -static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, - u16 rep_cnt, bool ex) +static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex) { - struct kvm *kvm = current_vcpu->kvm; - struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; @@ -1611,10 +1610,10 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, } } -static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, +static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa, bool ex, bool fast) { - struct kvm *kvm = current_vcpu->kvm; + struct kvm *kvm = vcpu->kvm; struct hv_send_ipi_ex send_ipi_ex; struct hv_send_ipi send_ipi; u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; -- cgit v1.2.3 From 9ff5e0304e949a8a4e584c8c2b11fad9b2e0b133 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:11 +0100 Subject: KVM: x86: hyper-v: Always use to_hv_vcpu() accessor to get to 'struct kvm_vcpu_hv' As a preparation to allocating Hyper-V context dynamically, make it clear who's the user of the said context. No functional change intended. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-11-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 14 ++++++++------ arch/x86/kvm/hyperv.h | 4 +++- arch/x86/kvm/lapic.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 8 +++++--- arch/x86/kvm/x86.c | 4 +++- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b49e8ca6be06..1482cad2770b 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -191,7 +191,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) { struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; hv_vcpu->exit.u.synic.msr = msr; @@ -294,7 +294,7 @@ static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; hv_vcpu->exit.u.syndbg.msr = msr; @@ -840,7 +840,9 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) { - if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) return false; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } @@ -1231,7 +1233,7 @@ static u64 current_task_runtime_100ns(void) static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); switch (msr) { case HV_X64_MSR_VP_INDEX: { @@ -1394,7 +1396,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { u64 data = 0; - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); switch (msr) { case HV_X64_MSR_VP_INDEX: @@ -1512,7 +1514,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex) { struct kvm *kvm = vcpu->kvm; - struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index fdb321ba9c3f..be1e3f5d1df6 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -119,7 +119,9 @@ static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stim static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) { - return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap, + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 74016d21738d..997c45a5963a 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -6,6 +6,8 @@ #include +#include "hyperv.h" + #define KVM_APIC_INIT 0 #define KVM_APIC_SIPI 1 #define KVM_APIC_LVT_NUM 6 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8f75374b221b..f83199f4d77f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6809,12 +6809,14 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); /* All fields are clean at this point */ - if (static_branch_unlikely(&enable_evmcs)) + if (static_branch_unlikely(&enable_evmcs)) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + current_evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; - if (static_branch_unlikely(&enable_evmcs)) - current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index; + current_evmcs->hv_vp_id = hv_vcpu->vp_index; + } /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (vmx->host_debugctlmsr) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1f93879c7364..40bd2e3b8f91 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8955,8 +8955,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + vcpu->run->exit_reason = KVM_EXIT_HYPERV; - vcpu->run->hyperv = vcpu->arch.hyperv.exit; + vcpu->run->hyperv = hv_vcpu->exit; r = 0; goto out; } -- cgit v1.2.3 From f2bc14b69c38b60f201fdf90c08cb2dc8966f331 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:12 +0100 Subject: KVM: x86: hyper-v: Prepare to meet unallocated Hyper-V context Currently, Hyper-V context is part of 'struct kvm_vcpu_arch' and is always available. As a preparation to allocating it dynamically, check that it is not NULL at call sites which can normally proceed without it i.e. the behavior is identical to the situation when Hyper-V emulation is not being used by the guest. When Hyper-V context for a particular vCPU is not allocated, we may still need to get 'vp_index' from there. E.g. in a hypothetical situation when Hyper-V emulation was enabled on one CPU and wasn't on another, Hyper-V style send-IPI hypercall may still be used. Luckily, vp_index is always initialized to kvm_vcpu_get_idx() and can only be changed when Hyper-V context is present. Introduce kvm_hv_get_vpindex() helper for simplification. No functional change intended. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-12-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 17 ++++++++++------- arch/x86/kvm/hyperv.h | 10 ++++++++++ arch/x86/kvm/lapic.c | 5 +++-- arch/x86/kvm/vmx/vmx.c | 4 +--- arch/x86/kvm/x86.c | 7 +++++-- 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 1482cad2770b..10e7ed2aa8fe 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) return NULL; vcpu = kvm_get_vcpu(kvm, vpidx); - if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx) + if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) - if (to_hv_vcpu(vcpu)->vp_index == vpidx) + if (kvm_hv_get_vpindex(vcpu) == vpidx) return vcpu; return NULL; } @@ -377,9 +377,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) break; } - trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, - to_hv_vcpu(vcpu)->vp_index, msr, - *pdata); + trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); return 0; } @@ -806,6 +804,9 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) u64 time_now, exp_time; int i; + if (!hv_vcpu) + return; + for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { stimer = &hv_vcpu->stimer[i]; @@ -842,6 +843,9 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + if (!hv_vcpu) + return false; + if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) return false; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; @@ -1504,8 +1508,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_bit(to_hv_vcpu(vcpu)->vp_index, - (unsigned long *)vp_bitmap)) + if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap)) __set_bit(i, vcpu_bitmap); } return vcpu_bitmap; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index be1e3f5d1df6..57e53a88f5eb 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -83,6 +83,13 @@ static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) return &vcpu->kvm->arch.hyperv.hv_syndbg; } +static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu); +} + int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); @@ -121,6 +128,9 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + if (!hv_vcpu) + return false; + return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 847fe11a49f5..45d40bfacb7c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1245,7 +1245,8 @@ static int apic_set_eoi(struct kvm_lapic *apic) apic_clear_isr(vector, apic); apic_update_ppr(apic); - if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) + if (to_hv_vcpu(apic->vcpu) && + test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) kvm_hv_synic_send_eoi(apic->vcpu, vector); kvm_ioapic_send_eoi(apic, vector); @@ -2512,7 +2513,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) */ apic_clear_irr(vector, apic); - if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { + if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { /* * For auto-EOI interrupts, there might be another pending * interrupt above PPR, so check whether to raise another diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f83199f4d77f..e0a3a9be654b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6810,12 +6810,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) /* All fields are clean at this point */ if (static_branch_unlikely(&enable_evmcs)) { - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); - current_evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; - current_evmcs->hv_vp_id = hv_vcpu->vp_index; + current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); } /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 40bd2e3b8f91..7caf95dadc93 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8803,8 +8803,11 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; - bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, - to_hv_synic(vcpu)->vec_bitmap, 256); + if (to_hv_vcpu(vcpu)) + bitmap_or((ulong *)eoi_exit_bitmap, + vcpu->arch.ioapic_handled_vectors, + to_hv_synic(vcpu)->vec_bitmap, 256); + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); } -- cgit v1.2.3 From 4592b7eaa87d3525825d4ab2a35308bcec9e5ff9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:13 +0100 Subject: KVM: x86: hyper-v: Allocate 'struct kvm_vcpu_hv' dynamically Hyper-V context is only needed for guests which use Hyper-V emulation in KVM (e.g. Windows/Hyper-V guests). 'struct kvm_vcpu_hv' is, however, quite big, it accounts for more than 1/4 of the total 'struct kvm_vcpu_arch' which is also quite big already. This all looks like a waste. Allocate 'struct kvm_vcpu_hv' dynamically. This patch does not bring any (intentional) functional change as we still allocate the context unconditionally but it paves the way to doing that only when needed. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-13-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/hyperv.c | 16 ++++++++++++++-- arch/x86/kvm/hyperv.h | 13 ++++++------- arch/x86/kvm/x86.c | 7 +++++-- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b6de568e7476..95eb6e93a4b7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -519,6 +519,7 @@ struct kvm_vcpu_hv_synic { /* Hyper-V per vcpu emulation context */ struct kvm_vcpu_hv { + struct kvm_vcpu *vcpu; u32 vp_index; u64 hv_vapic; s64 runtime_offset; @@ -735,7 +736,7 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ unsigned long singlestep_rip; - struct kvm_vcpu_hv hyperv; + struct kvm_vcpu_hv *hyperv; struct kvm_vcpu_xen xen; cpumask_var_t wbinvd_dirty_mask; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 10e7ed2aa8fe..333bd200f186 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -837,6 +837,9 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) stimer_cleanup(&hv_vcpu->stimer[i]); + + kfree(hv_vcpu); + vcpu->arch.hyperv = NULL; } bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) @@ -886,16 +889,25 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) stimer_prepare_msg(stimer); } -void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) +int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) { - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + struct kvm_vcpu_hv *hv_vcpu; int i; + hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); + if (!hv_vcpu) + return -ENOMEM; + + vcpu->arch.hyperv = hv_vcpu; + hv_vcpu->vcpu = vcpu; + synic_init(&hv_vcpu->synic); bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) stimer_init(&hv_vcpu->stimer[i], i); + + return 0; } void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 57e53a88f5eb..4bc2ab0caa92 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -57,20 +57,19 @@ static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm) static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) { - return &vcpu->arch.hyperv; + return vcpu->arch.hyperv; } static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu) { - struct kvm_vcpu_arch *arch; - - arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv); - return container_of(arch, struct kvm_vcpu, arch); + return hv_vcpu->vcpu; } static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) { - return &vcpu->arch.hyperv.synic; + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + + return &hv_vcpu->synic; } static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) @@ -101,7 +100,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); -void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); +int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7caf95dadc93..a75d01523c4f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10083,11 +10083,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.pending_external_vector = -1; vcpu->arch.preempted_in_kernel = false; - kvm_hv_vcpu_init(vcpu); + if (kvm_hv_vcpu_init(vcpu)) + goto free_guest_fpu; r = static_call(kvm_x86_vcpu_create)(vcpu); if (r) - goto free_guest_fpu; + goto free_hv_vcpu; vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; @@ -10098,6 +10099,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu_put(vcpu); return 0; +free_hv_vcpu: + kvm_hv_vcpu_uninit(vcpu); free_guest_fpu: kvm_free_guest_fpu(vcpu); free_user_fpu: -- cgit v1.2.3 From 8f014550dfb114cc7f42a517d20d2cf887a0b771 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:14 +0100 Subject: KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional Hyper-V emulation is enabled in KVM unconditionally. This is bad at least from security standpoint as it is an extra attack surface. Ideally, there should be a per-VM capability explicitly enabled by VMM but currently it is not the case and we can't mandate one without breaking backwards compatibility. We can, however, check guest visible CPUIDs and only enable Hyper-V emulation when "Hv#1" interface was exposed in HYPERV_CPUID_INTERFACE. Note, VMMs are free to act in any sequence they like, e.g. they can try to set MSRs first and CPUIDs later so we still need to allow the host to read/write Hyper-V specific MSRs unconditionally. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-14-vkuznets@redhat.com> [Add selftest vcpu_set_hv_cpuid API to avoid breaking xen_vmcall_test. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/cpuid.c | 2 ++ arch/x86/kvm/hyperv.c | 27 ++++++++++++--- arch/x86/kvm/hyperv.h | 3 +- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/xen.c | 2 +- .../selftests/kvm/include/x86_64/processor.h | 1 + tools/testing/selftests/kvm/lib/x86_64/processor.c | 35 +++++++++++++++++++ tools/testing/selftests/kvm/x86_64/evmcs_test.c | 40 ++-------------------- .../testing/selftests/kvm/x86_64/xen_vmcall_test.c | 2 +- 10 files changed, 69 insertions(+), 46 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 95eb6e93a4b7..84499aad01a4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -736,6 +736,7 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ unsigned long singlestep_rip; + bool hyperv_enabled; struct kvm_vcpu_hv *hyperv; struct kvm_vcpu_xen xen; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f47bbd4b0837..c8f2592ccc99 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -179,6 +179,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); + kvm_hv_set_cpuid(vcpu); + /* Invoke the vendor callback only after the above state is updated. */ static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 333bd200f186..11d16fe52324 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -37,6 +37,9 @@ #include "trace.h" #include "irq.h" +/* "Hv#1" signature */ +#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 + #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, @@ -1473,6 +1476,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + if (!host && !vcpu->arch.hyperv_enabled) + return 1; + if (kvm_hv_msr_partition_wide(msr)) { int r; @@ -1488,6 +1494,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + if (!host && !vcpu->arch.hyperv_enabled) + return 1; + if (kvm_hv_msr_partition_wide(msr)) { int r; @@ -1701,9 +1710,20 @@ ret_success: return HV_STATUS_SUCCESS; } -bool kvm_hv_hypercall_enabled(struct kvm *kvm) +void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *entry; + + entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); + if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) + vcpu->arch.hyperv_enabled = true; + else + vcpu->arch.hyperv_enabled = false; +} + +bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) { - return to_kvm_hv(kvm)->hv_guest_os_id != 0; + return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; } static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) @@ -2036,8 +2056,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, break; case HYPERV_CPUID_INTERFACE: - memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); - ent->eax = signature[0]; + ent->eax = HYPERV_CPUID_SIGNATURE_EAX; break; case HYPERV_CPUID_VERSION: diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 4bc2ab0caa92..f28e7fb88765 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -92,7 +92,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); -bool kvm_hv_hypercall_enabled(struct kvm *kvm); +bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu); int kvm_hv_hypercall(struct kvm_vcpu *vcpu); void kvm_hv_irq_routing_update(struct kvm *kvm); @@ -141,6 +141,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, void kvm_hv_init_vm(struct kvm *kvm); void kvm_hv_destroy_vm(struct kvm *kvm); +void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a75d01523c4f..1e5c304d14ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8162,7 +8162,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (kvm_xen_hypercall_enabled(vcpu->kvm)) return kvm_xen_hypercall(vcpu); - if (kvm_hv_hypercall_enabled(vcpu->kvm)) + if (kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); nr = kvm_rax_read(vcpu); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index b414fed1576b..b08a688e56f9 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -369,7 +369,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) /* Hyper-V hypercalls get bit 31 set in EAX */ if ((input & 0x80000000) && - kvm_hv_hypercall_enabled(vcpu->kvm)) + kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); longmode = is_64_bit_mode(vcpu); diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 42292d676eff..0b30b4e15c38 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -407,6 +407,7 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3); struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); +void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); /* diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 18483df2b331..de0c76177d02 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1323,6 +1323,41 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) return cpuid; } +void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + static struct kvm_cpuid2 *cpuid_full; + struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; + int i, nent = 0; + + if (!cpuid_full) { + cpuid_sys = kvm_get_supported_cpuid(); + cpuid_hv = kvm_get_supported_hv_cpuid(); + + cpuid_full = malloc(sizeof(*cpuid_full) + + (cpuid_sys->nent + cpuid_hv->nent) * + sizeof(struct kvm_cpuid_entry2)); + if (!cpuid_full) { + perror("malloc"); + abort(); + } + + /* Need to skip KVM CPUID leaves 0x400000xx */ + for (i = 0; i < cpuid_sys->nent; i++) { + if (cpuid_sys->entries[i].function >= 0x40000000 && + cpuid_sys->entries[i].function < 0x40000100) + continue; + cpuid_full->entries[nent] = cpuid_sys->entries[i]; + nent++; + } + + memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, + cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); + cpuid_full->nent = nent + cpuid_hv->nent; + } + + vcpu_set_cpuid(vm, vcpuid, cpuid_full); +} + struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) { static struct kvm_cpuid2 *cpuid; diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index 39a3cb2bd103..ca22ee6d19cb 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -78,42 +78,6 @@ void guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(vmlaunch()); } -struct kvm_cpuid2 *guest_get_cpuid(void) -{ - static struct kvm_cpuid2 *cpuid_full; - struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; - int i, nent = 0; - - if (cpuid_full) - return cpuid_full; - - cpuid_sys = kvm_get_supported_cpuid(); - cpuid_hv = kvm_get_supported_hv_cpuid(); - - cpuid_full = malloc(sizeof(*cpuid_full) + - (cpuid_sys->nent + cpuid_hv->nent) * - sizeof(struct kvm_cpuid_entry2)); - if (!cpuid_full) { - perror("malloc"); - abort(); - } - - /* Need to skip KVM CPUID leaves 0x400000xx */ - for (i = 0; i < cpuid_sys->nent; i++) { - if (cpuid_sys->entries[i].function >= 0x40000000 && - cpuid_sys->entries[i].function < 0x40000100) - continue; - cpuid_full->entries[nent] = cpuid_sys->entries[i]; - nent++; - } - - memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, - cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); - cpuid_full->nent = nent + cpuid_hv->nent; - - return cpuid_full; -} - int main(int argc, char *argv[]) { vm_vaddr_t vmx_pages_gva = 0; @@ -135,7 +99,7 @@ int main(int argc, char *argv[]) exit(KSFT_SKIP); } - vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); + vcpu_set_hv_cpuid(vm, VCPU_ID); vcpu_enable_evmcs(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID); @@ -179,7 +143,7 @@ int main(int argc, char *argv[]) /* Restore state in a new VM. */ kvm_vm_restart(vm, O_RDWR); vm_vcpu_add(vm, VCPU_ID); - vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); + vcpu_set_hv_cpuid(vm, VCPU_ID); vcpu_enable_evmcs(vm, VCPU_ID); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index 6e7b069322fd..86653361c695 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -92,7 +92,7 @@ int main(int argc, char *argv[]) } vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); - vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_set_hv_cpuid(vm, VCPU_ID); struct kvm_xen_hvm_config hvmc = { .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, -- cgit v1.2.3 From fc08b628d7c96d9a6d6bca488c3fa9c92bee6cc8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:15 +0100 Subject: KVM: x86: hyper-v: Allocate Hyper-V context lazily Hyper-V context is only needed for guests which use Hyper-V emulation in KVM (e.g. Windows/Hyper-V guests) so we don't actually need to allocate it in kvm_arch_vcpu_create(), we can postpone the action until Hyper-V specific MSRs are accessed or SynIC is enabled. Once allocated, let's keep the context alive for the lifetime of the vCPU as an attempt to free it would require additional synchronization with other vCPUs and normally it is not supposed to happen. Note, Hyper-V style hypercall enablement is done by writing to HV_X64_MSR_GUEST_OS_ID so we don't need to worry about allocating Hyper-V context from kvm_hv_hypercall(). Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-15-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 33 +++++++++++++++++++++++++-------- arch/x86/kvm/hyperv.h | 2 -- arch/x86/kvm/x86.c | 9 +-------- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 11d16fe52324..7d2dae92d638 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -838,6 +838,9 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); int i; + if (!hv_vcpu) + return; + for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) stimer_cleanup(&hv_vcpu->stimer[i]); @@ -892,7 +895,7 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) stimer_prepare_msg(stimer); } -int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) +static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu; int i; @@ -910,19 +913,23 @@ int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) stimer_init(&hv_vcpu->stimer[i], i); + hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); + return 0; } -void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) +int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) { - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + struct kvm_vcpu_hv_synic *synic; + int r; - hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); -} + if (!to_hv_vcpu(vcpu)) { + r = kvm_hv_vcpu_init(vcpu); + if (r) + return r; + } -int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) -{ - struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); + synic = to_hv_synic(vcpu); /* * Hyper-V SynIC auto EOI SINT's are @@ -1479,6 +1486,11 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) if (!host && !vcpu->arch.hyperv_enabled) return 1; + if (!to_hv_vcpu(vcpu)) { + if (kvm_hv_vcpu_init(vcpu)) + return 1; + } + if (kvm_hv_msr_partition_wide(msr)) { int r; @@ -1497,6 +1509,11 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) if (!host && !vcpu->arch.hyperv_enabled) return 1; + if (!to_hv_vcpu(vcpu)) { + if (kvm_hv_vcpu_init(vcpu)) + return 1; + } + if (kvm_hv_msr_partition_wide(msr)) { int r; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index f28e7fb88765..4e0f886eb2a9 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -100,8 +100,6 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); -int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1e5c304d14ab..3fa140383f5d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10083,12 +10083,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.pending_external_vector = -1; vcpu->arch.preempted_in_kernel = false; - if (kvm_hv_vcpu_init(vcpu)) - goto free_guest_fpu; - r = static_call(kvm_x86_vcpu_create)(vcpu); if (r) - goto free_hv_vcpu; + goto free_guest_fpu; vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; @@ -10099,8 +10096,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu_put(vcpu); return 0; -free_hv_vcpu: - kvm_hv_vcpu_uninit(vcpu); free_guest_fpu: kvm_free_guest_fpu(vcpu); free_user_fpu: @@ -10124,8 +10119,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - kvm_hv_vcpu_postcreate(vcpu); - if (mutex_lock_killable(&vcpu->mutex)) return; vcpu_load(vcpu); -- cgit v1.2.3 From b9ce0f86d91942596bce0267bfa7db3bbd7b434f Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:16 +0100 Subject: KVM: x86: hyper-v: Drop hv_vcpu_to_vcpu() helper hv_vcpu_to_vcpu() helper is only used by other helpers and is not very complex, we can drop it without much regret. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-16-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 4e0f886eb2a9..e951af1fcb2c 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -60,11 +60,6 @@ static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) return vcpu->arch.hyperv; } -static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu) -{ - return hv_vcpu->vcpu; -} - static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); @@ -74,7 +69,9 @@ static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) { - return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic)); + struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic); + + return hv_vcpu->vcpu; } static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) @@ -118,7 +115,7 @@ static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stim hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv, stimer[0]); - return hv_vcpu_to_vcpu(hv_vcpu); + return hv_vcpu->vcpu; } static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 8f5c44f953d36f8c1aea6d57eb3251e3640f4dad Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 8 Feb 2021 19:51:32 +0100 Subject: KVM: x86/mmu: Make HVA handler retpoline-friendly When retpolines are enabled they have high overhead in the inner loop inside kvm_handle_hva_range() that iterates over the provided memory area. Let's mark this function and its TDP MMU equivalent __always_inline so compiler will be able to change the call to the actual handler function inside each of them into a direct one. This significantly improves performance on the unmap test on the existing kernel memslot code (tested on a Xeon 8167M machine): 30 slots in use: Test Before After Improvement Unmap 0.0353s 0.0334s 5% Unmap 2M 0.00104s 0.000407s 61% 509 slots in use: Test Before After Improvement Unmap 0.0742s 0.0740s None Unmap 2M 0.00221s 0.00159s 28% Looks like having an indirect call in these functions (and, so, a retpoline) might have interfered with unrolling of the whole loop in the CPU. Signed-off-by: Maciej S. Szmigiero Message-Id: <732d3fe9eb68aa08402a638ab0309199fa89ae56.1612810129.git.maciej.szmigiero@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 21 +++++++++++---------- arch/x86/kvm/mmu/tdp_mmu.c | 16 +++++++++++----- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index bebb66c6d068..e507568cd55d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1457,16 +1457,17 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) slot_rmap_walk_okay(_iter_); \ slot_rmap_walk_next(_iter_)) -static int kvm_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - int (*handler)(struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, - gfn_t gfn, - int level, - unsigned long data)) +static __always_inline int +kvm_handle_hva_range(struct kvm *kvm, + unsigned long start, + unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, + gfn_t gfn, + int level, + unsigned long data)) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index ec99e50a7ba7..71e100a5670f 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -879,11 +879,17 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return ret; } -static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end, unsigned long data, - int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, - gfn_t end, unsigned long data)) +static __always_inline int +kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, + unsigned long start, + unsigned long end, + unsigned long data, + int (*handler)(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_mmu_page *root, + gfn_t start, + gfn_t end, + unsigned long data)) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; -- cgit v1.2.3 From 0c165b3c01fefa78f440d5759cef47e8754a7a32 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 8 Feb 2021 23:23:25 +0000 Subject: KVM: x86/xen: Allow reset of Xen attributes In order to support Xen SHUTDOWN_soft_reset (for guest kexec, etc.) the VMM needs to be able to tear everything down and return the Xen features to a clean slate. Signed-off-by: David Woodhouse Message-Id: <20210208232326.1830370-1-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index b08a688e56f9..af8f6562fce4 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -116,12 +116,17 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: + if (data->u.shared_info.gfn == GPA_INVALID) { + kvm->arch.xen.shinfo_set = false; + r = 0; + break; + } r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); break; case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: - if (data->u.vector < 0x10) + if (data->u.vector && data->u.vector < 0x10) r = -EINVAL; else { kvm->arch.xen.upcall_vector = data->u.vector; @@ -150,10 +155,11 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: - if (kvm->arch.xen.shinfo_set) { + if (kvm->arch.xen.shinfo_set) data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); - r = 0; - } + else + data->u.shared_info.gfn = GPA_INVALID; + r = 0; break; case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: @@ -182,6 +188,11 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) BUILD_BUG_ON(sizeof(struct vcpu_info) != sizeof(struct compat_vcpu_info)); + if (data->u.gpa == GPA_INVALID) { + vcpu->arch.xen.vcpu_info_set = false; + break; + } + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache, data->u.gpa, @@ -193,6 +204,11 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) break; case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + if (data->u.gpa == GPA_INVALID) { + vcpu->arch.xen.vcpu_time_info_set = false; + break; + } + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache, data->u.gpa, @@ -220,17 +236,19 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) switch (data->type) { case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: - if (vcpu->arch.xen.vcpu_info_set) { + if (vcpu->arch.xen.vcpu_info_set) data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; - r = 0; - } + else + data->u.gpa = GPA_INVALID; + r = 0; break; case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: - if (vcpu->arch.xen.vcpu_time_info_set) { + if (vcpu->arch.xen.vcpu_time_info_set) data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; - r = 0; - } + else + data->u.gpa = GPA_INVALID; + r = 0; break; default: -- cgit v1.2.3 From 9294b8a12585f8b4ccb9c060b54bab0bd13f24b9 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 9 Feb 2021 05:07:45 -0500 Subject: Documentation: kvm: fix warning Documentation/virt/kvm/api.rst:4927: WARNING: Title underline too short. 4.130 KVM_XEN_VCPU_GET_ATTR -------------------------- Fixes: e1f68169a4f8 ("KVM: Add documentation for Xen hypercall and shared_info updates") Reported-by: Stephen Rothwell Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 5919e9a94f25..482508ec7cc4 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4924,7 +4924,7 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO for a given vCPU. This is typically used for guest vsyscall support. 4.130 KVM_XEN_VCPU_GET_ATTR --------------------------- +--------------------------- :Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO :Architectures: x86 -- cgit v1.2.3 From afe75049303f75c73a97d635a4d353c7d571d1a3 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Wed, 16 Dec 2020 16:12:16 +0530 Subject: KVM: PPC: Book3S HV: Allow nested guest creation when L0 hv_guest_state > L1 On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED hcall to load L2 guest state in cpu. L1 hypervisor prepares the L2 state in struct hv_guest_state and passes a pointer to it via hcall. Using that pointer, L0 reads/writes that state directly from/to L1 memory. Thus L0 must be aware of hv_guest_state layout of L1. Currently it uses version field to achieve this. i.e. If L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't allow nested kvm guest. This restriction can be loosened up a bit. L0 can be taught to understand older layout of hv_guest_state, if we restrict the new members to be added only at the end, i.e. we can allow nested guest even when L0 hv_guest_state.version > L1 hv_guest_state.version. Though, the other way around is not possible. Signed-off-by: Ravi Bangoria Reviewed-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hvcall.h | 17 ++++++++++-- arch/powerpc/kvm/book3s_hv_nested.c | 55 ++++++++++++++++++++++++++++++------- 2 files changed, 60 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index c98f5141e3fc..cb4d07e2c0cc 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -535,9 +535,12 @@ struct h_cpu_char_result { u64 behaviour; }; -/* Register state for entering a nested guest with H_ENTER_NESTED */ +/* + * Register state for entering a nested guest with H_ENTER_NESTED. + * New member must be added at the end. + */ struct hv_guest_state { - u64 version; /* version of this structure layout */ + u64 version; /* version of this structure layout, must be first */ u32 lpid; u32 vcpu_token; /* These registers are hypervisor privileged (at least for writing) */ @@ -571,6 +574,16 @@ struct hv_guest_state { /* Latest version of hv_guest_state structure */ #define HV_GUEST_STATE_VERSION 1 +static inline int hv_guest_state_size(unsigned int version) +{ + switch (version) { + case 1: + return offsetofend(struct hv_guest_state, ppr); + default: + return -1; + } +} + /* * From the document "H_GetPerformanceCounterInfo Interface" v1.07 * diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 33b58549a9aa..5b8043cf4668 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -215,12 +215,51 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr) } } +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu, + struct hv_guest_state *l2_hv, + struct pt_regs *l2_regs, + u64 hv_ptr, u64 regs_ptr) +{ + int size; + + if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version, + sizeof(l2_hv->version))) + return -1; + + if (kvmppc_need_byteswap(vcpu)) + l2_hv->version = swab64(l2_hv->version); + + size = hv_guest_state_size(l2_hv->version); + if (size < 0) + return -1; + + return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) || + kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs, + sizeof(struct pt_regs)); +} + +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu, + struct hv_guest_state *l2_hv, + struct pt_regs *l2_regs, + u64 hv_ptr, u64 regs_ptr) +{ + int size; + + size = hv_guest_state_size(l2_hv->version); + if (size < 0) + return -1; + + return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) || + kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs, + sizeof(struct pt_regs)); +} + long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) { long int err, r; struct kvm_nested_guest *l2; struct pt_regs l2_regs, saved_l1_regs; - struct hv_guest_state l2_hv, saved_l1_hv; + struct hv_guest_state l2_hv = {0}, saved_l1_hv; struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 hv_ptr, regs_ptr; u64 hdec_exp; @@ -235,17 +274,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) hv_ptr = kvmppc_get_gpr(vcpu, 4); regs_ptr = kvmppc_get_gpr(vcpu, 5); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv, - sizeof(struct hv_guest_state)) || - kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs, - sizeof(struct pt_regs)); + err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, + hv_ptr, regs_ptr); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (err) return H_PARAMETER; if (kvmppc_need_byteswap(vcpu)) byteswap_hv_regs(&l2_hv); - if (l2_hv.version != HV_GUEST_STATE_VERSION) + if (l2_hv.version > HV_GUEST_STATE_VERSION) return H_P2; if (kvmppc_need_byteswap(vcpu)) @@ -325,10 +362,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) byteswap_pt_regs(&l2_regs); } vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv, - sizeof(struct hv_guest_state)) || - kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs, - sizeof(struct pt_regs)); + err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, + hv_ptr, regs_ptr); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (err) return H_AUTHORITY; -- cgit v1.2.3 From 122954ed7db313c53698c35005e93e8e04135428 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Wed, 16 Dec 2020 16:12:17 +0530 Subject: KVM: PPC: Book3S HV: Rename current DAWR macros and variables Power10 is introducing a second DAWR (Data Address Watchpoint Register). Use real register names (with suffix 0) from ISA for current macros and variables used by kvm. One exception is KVM_REG_PPC_DAWR. Keep it as it is because it's uapi so changing it will break userspace. Signed-off-by: Ravi Bangoria Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 4 ++-- arch/powerpc/kernel/asm-offsets.c | 4 ++-- arch/powerpc/kvm/book3s_hv.c | 24 ++++++++++++------------ arch/powerpc/kvm/book3s_hv_nested.c | 8 ++++---- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 20 ++++++++++---------- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 2b9b6855ec86..cf52081c9282 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -583,8 +583,8 @@ struct kvm_vcpu_arch { u32 ctrl; u32 dabrx; ulong dabr; - ulong dawr; - ulong dawrx; + ulong dawr0; + ulong dawrx0; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index b12d7c049bfe..410b8c47443f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -526,8 +526,8 @@ int main(void) OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); - OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); - OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); + OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0); + OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0); OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6f612d240392..07531ac6ba5d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -782,8 +782,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, return H_UNSUPPORTED_FLAG_START; if (value2 & DABRX_HYP) return H_P4; - vcpu->arch.dawr = value1; - vcpu->arch.dawrx = value2; + vcpu->arch.dawr0 = value1; + vcpu->arch.dawrx0 = value2; return H_SUCCESS; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: /* KVM does not support mflags=2 (AIL=2) */ @@ -1759,10 +1759,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.vcore->vtb); break; case KVM_REG_PPC_DAWR: - *val = get_reg_val(id, vcpu->arch.dawr); + *val = get_reg_val(id, vcpu->arch.dawr0); break; case KVM_REG_PPC_DAWRX: - *val = get_reg_val(id, vcpu->arch.dawrx); + *val = get_reg_val(id, vcpu->arch.dawrx0); break; case KVM_REG_PPC_CIABR: *val = get_reg_val(id, vcpu->arch.ciabr); @@ -1991,10 +1991,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.vcore->vtb = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWR: - vcpu->arch.dawr = set_reg_val(id, *val); + vcpu->arch.dawr0 = set_reg_val(id, *val); break; case KVM_REG_PPC_DAWRX: - vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; + vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; break; case KVM_REG_PPC_CIABR: vcpu->arch.ciabr = set_reg_val(id, *val); @@ -3449,8 +3449,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, int trap; unsigned long host_hfscr = mfspr(SPRN_HFSCR); unsigned long host_ciabr = mfspr(SPRN_CIABR); - unsigned long host_dawr = mfspr(SPRN_DAWR0); - unsigned long host_dawrx = mfspr(SPRN_DAWRX0); + unsigned long host_dawr0 = mfspr(SPRN_DAWR0); + unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0); unsigned long host_psscr = mfspr(SPRN_PSSCR); unsigned long host_pidr = mfspr(SPRN_PID); @@ -3489,8 +3489,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_SPURR, vcpu->arch.spurr); if (dawr_enabled()) { - mtspr(SPRN_DAWR0, vcpu->arch.dawr); - mtspr(SPRN_DAWRX0, vcpu->arch.dawrx); + mtspr(SPRN_DAWR0, vcpu->arch.dawr0); + mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0); } mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_IC, vcpu->arch.ic); @@ -3542,8 +3542,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); mtspr(SPRN_HFSCR, host_hfscr); mtspr(SPRN_CIABR, host_ciabr); - mtspr(SPRN_DAWR0, host_dawr); - mtspr(SPRN_DAWRX0, host_dawrx); + mtspr(SPRN_DAWR0, host_dawr0); + mtspr(SPRN_DAWRX0, host_dawrx0); mtspr(SPRN_PID, host_pidr); /* diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 5b8043cf4668..d22e45b97f88 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -33,8 +33,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) hr->dpdes = vc->dpdes; hr->hfscr = vcpu->arch.hfscr; hr->tb_offset = vc->tb_offset; - hr->dawr0 = vcpu->arch.dawr; - hr->dawrx0 = vcpu->arch.dawrx; + hr->dawr0 = vcpu->arch.dawr0; + hr->dawrx0 = vcpu->arch.dawrx0; hr->ciabr = vcpu->arch.ciabr; hr->purr = vcpu->arch.purr; hr->spurr = vcpu->arch.spurr; @@ -151,8 +151,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) vc->pcr = hr->pcr | PCR_MASK; vc->dpdes = hr->dpdes; vcpu->arch.hfscr = hr->hfscr; - vcpu->arch.dawr = hr->dawr0; - vcpu->arch.dawrx = hr->dawrx0; + vcpu->arch.dawr0 = hr->dawr0; + vcpu->arch.dawrx0 = hr->dawrx0; vcpu->arch.ciabr = hr->ciabr; vcpu->arch.purr = hr->purr; vcpu->arch.spurr = hr->spurr; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index cd9995ee8441..75804062f2c5 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -52,8 +52,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_PID (SFS-32) #define STACK_SLOT_IAMR (SFS-40) #define STACK_SLOT_CIABR (SFS-48) -#define STACK_SLOT_DAWR (SFS-56) -#define STACK_SLOT_DAWRX (SFS-64) +#define STACK_SLOT_DAWR0 (SFS-56) +#define STACK_SLOT_DAWRX0 (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) #define STACK_SLOT_AMR (SFS-80) #define STACK_SLOT_UAMOR (SFS-88) @@ -711,8 +711,8 @@ BEGIN_FTR_SECTION mfspr r7, SPRN_DAWRX0 mfspr r8, SPRN_IAMR std r5, STACK_SLOT_CIABR(r1) - std r6, STACK_SLOT_DAWR(r1) - std r7, STACK_SLOT_DAWRX(r1) + std r6, STACK_SLOT_DAWR0(r1) + std r7, STACK_SLOT_DAWRX0(r1) std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) @@ -801,8 +801,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) lbz r5, 0(r5) cmpdi r5, 0 beq 1f - ld r5, VCPU_DAWR(r4) - ld r6, VCPU_DAWRX(r4) + ld r5, VCPU_DAWR0(r4) + ld r6, VCPU_DAWRX0(r4) mtspr SPRN_DAWR0, r5 mtspr SPRN_DAWRX0, r6 1: @@ -1759,8 +1759,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* Restore host values of some registers */ BEGIN_FTR_SECTION ld r5, STACK_SLOT_CIABR(r1) - ld r6, STACK_SLOT_DAWR(r1) - ld r7, STACK_SLOT_DAWRX(r1) + ld r6, STACK_SLOT_DAWR0(r1) + ld r7, STACK_SLOT_DAWRX0(r1) mtspr SPRN_CIABR, r5 /* * If the DAWR doesn't work, it's ok to write these here as @@ -2574,8 +2574,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 2, DAWRX_WT clrrdi r4, r4, 3 - std r4, VCPU_DAWR(r3) - std r5, VCPU_DAWRX(r3) + std r4, VCPU_DAWR0(r3) + std r5, VCPU_DAWRX0(r3) /* * If came in through the real mode hcall handler then it is necessary * to write the registers since the return path won't. Otherwise it is -- cgit v1.2.3 From bd1de1a0e6eff4bde5ceae969673b85b8446fd6a Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Wed, 16 Dec 2020 16:12:18 +0530 Subject: KVM: PPC: Book3S HV: Add infrastructure to support 2nd DAWR KVM code assumes single DAWR everywhere. Add code to support 2nd DAWR. DAWR is a hypervisor resource and thus H_SET_MODE hcall is used to set/ unset it. Introduce new case H_SET_MODE_RESOURCE_SET_DAWR1 for 2nd DAWR. Also, KVM will support 2nd DAWR only if CPU_FTR_DAWR1 is set. Signed-off-by: Ravi Bangoria Signed-off-by: Paul Mackerras --- Documentation/virt/kvm/api.rst | 2 ++ arch/powerpc/include/asm/hvcall.h | 8 +++++- arch/powerpc/include/asm/kvm_host.h | 3 +++ arch/powerpc/include/uapi/asm/kvm.h | 2 ++ arch/powerpc/kernel/asm-offsets.c | 2 ++ arch/powerpc/kvm/book3s_hv.c | 43 +++++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_nested.c | 7 +++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 23 +++++++++++++++++ tools/arch/powerpc/include/uapi/asm/kvm.h | 2 ++ 9 files changed, 91 insertions(+), 1 deletion(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 482508ec7cc4..a783a8da818b 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -2276,6 +2276,8 @@ registers, find a list below: PPC KVM_REG_PPC_PSSCR 64 PPC KVM_REG_PPC_DEC_EXPIRY 64 PPC KVM_REG_PPC_PTCR 64 + PPC KVM_REG_PPC_DAWR1 64 + PPC KVM_REG_PPC_DAWRX1 64 PPC KVM_REG_PPC_TM_GPR0 64 ... PPC KVM_REG_PPC_TM_GPR31 64 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index cb4d07e2c0cc..ed6086d57b22 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -569,16 +569,22 @@ struct hv_guest_state { u64 pidr; u64 cfar; u64 ppr; + /* Version 1 ends here */ + u64 dawr1; + u64 dawrx1; + /* Version 2 ends here */ }; /* Latest version of hv_guest_state structure */ -#define HV_GUEST_STATE_VERSION 1 +#define HV_GUEST_STATE_VERSION 2 static inline int hv_guest_state_size(unsigned int version) { switch (version) { case 1: return offsetofend(struct hv_guest_state, ppr); + case 2: + return offsetofend(struct hv_guest_state, dawrx1); default: return -1; } diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index cf52081c9282..05fb00d37609 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -306,6 +306,7 @@ struct kvm_arch { u8 svm_enabled; bool threads_indep; bool nested_enable; + bool dawr1_enabled; pgd_t *pgtable; u64 process_table; struct dentry *debugfs_dir; @@ -585,6 +586,8 @@ struct kvm_vcpu_arch { ulong dabr; ulong dawr0; ulong dawrx0; + ulong dawr1; + ulong dawrx1; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index c3af3f324c5a..9f18fa090f1f 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -644,6 +644,8 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) #define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) +#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) +#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 410b8c47443f..4771a8511216 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -528,6 +528,8 @@ int main(void) OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0); OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0); + OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1); + OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1); OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 07531ac6ba5d..b37bddfead1b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -785,6 +785,22 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, vcpu->arch.dawr0 = value1; vcpu->arch.dawrx0 = value2; return H_SUCCESS; + case H_SET_MODE_RESOURCE_SET_DAWR1: + if (!kvmppc_power8_compatible(vcpu)) + return H_P2; + if (!ppc_breakpoint_available()) + return H_P2; + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return H_P2; + if (!vcpu->kvm->arch.dawr1_enabled) + return H_FUNCTION; + if (mflags) + return H_UNSUPPORTED_FLAG_START; + if (value2 & DABRX_HYP) + return H_P4; + vcpu->arch.dawr1 = value1; + vcpu->arch.dawrx1 = value2; + return H_SUCCESS; case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: /* KVM does not support mflags=2 (AIL=2) */ if (mflags != 0 && mflags != 3) @@ -1764,6 +1780,12 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DAWRX: *val = get_reg_val(id, vcpu->arch.dawrx0); break; + case KVM_REG_PPC_DAWR1: + *val = get_reg_val(id, vcpu->arch.dawr1); + break; + case KVM_REG_PPC_DAWRX1: + *val = get_reg_val(id, vcpu->arch.dawrx1); + break; case KVM_REG_PPC_CIABR: *val = get_reg_val(id, vcpu->arch.ciabr); break; @@ -1996,6 +2018,12 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DAWRX: vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; break; + case KVM_REG_PPC_DAWR1: + vcpu->arch.dawr1 = set_reg_val(id, *val); + break; + case KVM_REG_PPC_DAWRX1: + vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; + break; case KVM_REG_PPC_CIABR: vcpu->arch.ciabr = set_reg_val(id, *val); /* Don't allow setting breakpoints in hypervisor code */ @@ -3453,6 +3481,13 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0); unsigned long host_psscr = mfspr(SPRN_PSSCR); unsigned long host_pidr = mfspr(SPRN_PID); + unsigned long host_dawr1 = 0; + unsigned long host_dawrx1 = 0; + + if (cpu_has_feature(CPU_FTR_DAWR1)) { + host_dawr1 = mfspr(SPRN_DAWR1); + host_dawrx1 = mfspr(SPRN_DAWRX1); + } /* * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0, @@ -3491,6 +3526,10 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, if (dawr_enabled()) { mtspr(SPRN_DAWR0, vcpu->arch.dawr0); mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0); + if (cpu_has_feature(CPU_FTR_DAWR1)) { + mtspr(SPRN_DAWR1, vcpu->arch.dawr1); + mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1); + } } mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_IC, vcpu->arch.ic); @@ -3544,6 +3583,10 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_CIABR, host_ciabr); mtspr(SPRN_DAWR0, host_dawr0); mtspr(SPRN_DAWRX0, host_dawrx0); + if (cpu_has_feature(CPU_FTR_DAWR1)) { + mtspr(SPRN_DAWR1, host_dawr1); + mtspr(SPRN_DAWRX1, host_dawrx1); + } mtspr(SPRN_PID, host_pidr); /* diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index d22e45b97f88..0cd0e7aad588 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -49,6 +49,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) hr->pidr = vcpu->arch.pid; hr->cfar = vcpu->arch.cfar; hr->ppr = vcpu->arch.ppr; + hr->dawr1 = vcpu->arch.dawr1; + hr->dawrx1 = vcpu->arch.dawrx1; } static void byteswap_pt_regs(struct pt_regs *regs) @@ -91,6 +93,8 @@ static void byteswap_hv_regs(struct hv_guest_state *hr) hr->pidr = swab64(hr->pidr); hr->cfar = swab64(hr->cfar); hr->ppr = swab64(hr->ppr); + hr->dawr1 = swab64(hr->dawr1); + hr->dawrx1 = swab64(hr->dawrx1); } static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, @@ -138,6 +142,7 @@ static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) /* Don't let data address watchpoint match in hypervisor state */ hr->dawrx0 &= ~DAWRX_HYP; + hr->dawrx1 &= ~DAWRX_HYP; /* Don't let completed instruction address breakpt match in HV state */ if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) @@ -167,6 +172,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) vcpu->arch.pid = hr->pidr; vcpu->arch.cfar = hr->cfar; vcpu->arch.ppr = hr->ppr; + vcpu->arch.dawr1 = hr->dawr1; + vcpu->arch.dawrx1 = hr->dawrx1; } void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 75804062f2c5..65161c062dc5 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -57,6 +57,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_HFSCR (SFS-72) #define STACK_SLOT_AMR (SFS-80) #define STACK_SLOT_UAMOR (SFS-88) +#define STACK_SLOT_DAWR1 (SFS-96) +#define STACK_SLOT_DAWRX1 (SFS-104) /* the following is used by the P9 short path */ #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ @@ -715,6 +717,12 @@ BEGIN_FTR_SECTION std r7, STACK_SLOT_DAWRX0(r1) std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION + mfspr r6, SPRN_DAWR1 + mfspr r7, SPRN_DAWRX1 + std r6, STACK_SLOT_DAWR1(r1) + std r7, STACK_SLOT_DAWRX1(r1) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1) mfspr r5, SPRN_AMR std r5, STACK_SLOT_AMR(r1) @@ -805,6 +813,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ld r6, VCPU_DAWRX0(r4) mtspr SPRN_DAWR0, r5 mtspr SPRN_DAWRX0, r6 +BEGIN_FTR_SECTION + ld r5, VCPU_DAWR1(r4) + ld r6, VCPU_DAWRX1(r4) + mtspr SPRN_DAWR1, r5 + mtspr SPRN_DAWRX1, r6 +END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) 1: ld r7, VCPU_CIABR(r4) ld r8, VCPU_TAR(r4) @@ -1769,6 +1783,12 @@ BEGIN_FTR_SECTION mtspr SPRN_DAWR0, r6 mtspr SPRN_DAWRX0, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION + ld r6, STACK_SLOT_DAWR1(r1) + ld r7, STACK_SLOT_DAWRX1(r1) + mtspr SPRN_DAWR1, r6 + mtspr SPRN_DAWRX1, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1) BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) @@ -3343,6 +3363,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_IAMR, r0 mtspr SPRN_CIABR, r0 mtspr SPRN_DAWRX0, r0 +BEGIN_FTR_SECTION + mtspr SPRN_DAWRX1, r0 +END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) BEGIN_MMU_FTR_SECTION b 4f diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index c3af3f324c5a..9f18fa090f1f 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -644,6 +644,8 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) #define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) +#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) +#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs -- cgit v1.2.3 From d9a47edabc4f948102753fa9d41f2dc1dbeb28be Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Wed, 16 Dec 2020 16:12:19 +0530 Subject: KVM: PPC: Book3S HV: Introduce new capability for 2nd DAWR Introduce KVM_CAP_PPC_DAWR1 which can be used by QEMU to query whether KVM supports 2nd DAWR or not. The capability is by default disabled even when the underlying CPU supports 2nd DAWR. QEMU needs to check and enable it manually to use the feature. Signed-off-by: Ravi Bangoria Signed-off-by: Paul Mackerras --- Documentation/virt/kvm/api.rst | 10 ++++++++++ arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 12 ++++++++++++ arch/powerpc/kvm/powerpc.c | 10 ++++++++++ include/uapi/linux/kvm.h | 1 + tools/include/uapi/linux/kvm.h | 1 + 6 files changed, 35 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a783a8da818b..45fd862ac128 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6215,6 +6215,16 @@ the bus lock vm exit can be preempted by a higher priority VM exit, the exit notifications to userspace can be KVM_EXIT_BUS_LOCK or other reasons. KVM_RUN_BUS_LOCK flag is used to distinguish between them. +7.22 KVM_CAP_PPC_DAWR1 +---------------------- + +:Architectures: ppc +:Parameters: none +:Returns: 0 on success, -EINVAL when CPU doesn't support 2nd DAWR + +This capability can be used to check / enable 2nd DAWR feature provided +by POWER10 processor. + 8. Other capabilities. ====================== diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0a056c64c317..13c39d24dda5 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -314,6 +314,7 @@ struct kvmppc_ops { int size); int (*enable_svm)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm); + int (*enable_dawr1)(struct kvm *kvm); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index b37bddfead1b..dffbfd1ab47b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5642,6 +5642,17 @@ out: return ret; } +static int kvmhv_enable_dawr1(struct kvm *kvm) +{ + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return -ENODEV; + + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.dawr1_enabled = true; + return 0; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5685,6 +5696,7 @@ static struct kvmppc_ops kvm_ops_hv = { .store_to_eaddr = kvmhv_store_to_eaddr, .enable_svm = kvmhv_enable_svm, .svm_off = kvmhv_svm_off, + .enable_dawr1 = kvmhv_enable_dawr1, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index cf52d26f49cd..5914c32d9ef3 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -678,6 +678,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = hv_enabled && kvmppc_hv_ops->enable_svm && !kvmppc_hv_ops->enable_svm(NULL); break; + case KVM_CAP_PPC_DAWR1: + r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && + !kvmppc_hv_ops->enable_dawr1(NULL)); + break; #endif default: r = 0; @@ -2187,6 +2191,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, break; r = kvm->arch.kvm_ops->enable_svm(kvm); break; + case KVM_CAP_PPC_DAWR1: + r = -EINVAL; + if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) + break; + r = kvm->arch.kvm_ops->enable_dawr1(kvm); + break; #endif default: r = -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 63f8f6e95648..8b281f722e5b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1077,6 +1077,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 #define KVM_CAP_X86_BUS_LOCK_EXIT 193 +#define KVM_CAP_PPC_DAWR1 194 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 886802b8ffba..9c284a9c3ac5 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1056,6 +1056,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 +#define KVM_CAP_PPC_DAWR1 194 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From b1b1697ae0cc82544a03b69df49a69a9ac307b9c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 18 Jan 2021 16:28:06 +1000 Subject: KVM: PPC: Book3S HV: Remove support for running HPT guest on RPT host without mixed mode support This reverts much of commit c01015091a770 ("KVM: PPC: Book3S HV: Run HPT guests on POWER9 radix hosts"), which was required to run HPT guests on RPT hosts on early POWER9 CPUs without support for "mixed mode", which meant the host could not run with MMU on while guests were running. This code has some corner case bugs, e.g., when the guest hits a machine check or HMI the primary locks up waiting for secondaries to switch LPCR to host, which they never do. This could all be fixed in software, but most CPUs in production have mixed mode support, and those that don't are believed to be all in installations that don't use this capability. So simplify things and remove support. Signed-off-by: Nicholas Piggin Tested-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_book3s_asm.h | 11 --- arch/powerpc/kernel/asm-offsets.c | 3 - arch/powerpc/kvm/book3s_hv.c | 56 ++++------------ arch/powerpc/kvm/book3s_hv_builtin.c | 108 +----------------------------- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 80 ++++++---------------- 5 files changed, 32 insertions(+), 226 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 078f4648ea27..b6d31bff5209 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -74,16 +74,6 @@ struct kvm_split_mode { u8 do_nap; u8 napped[MAX_SMT_THREADS]; struct kvmppc_vcore *vc[MAX_SUBCORES]; - /* Bits for changing lpcr on P9 */ - unsigned long lpcr_req; - unsigned long lpidr_req; - unsigned long host_lpcr; - u32 do_set; - u32 do_restore; - union { - u32 allphases; - u8 phase[4]; - } lpcr_sync; }; /* @@ -110,7 +100,6 @@ struct kvmppc_host_state { u8 hwthread_state; u8 host_ipi; u8 ptid; /* thread number within subcore when split */ - u8 tid; /* thread number within whole core */ u8 fake_suspend; struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4771a8511216..b690c70f061c 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -670,7 +670,6 @@ int main(void) HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_PTID, ptid); - HSTATE_FIELD(HSTATE_TID, tid); HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend); HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]); HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]); @@ -700,8 +699,6 @@ int main(void) OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar); OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap); OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped); - OFFSET(KVM_SPLIT_DO_SET, kvm_split_mode, do_set); - OFFSET(KVM_SPLIT_DO_RESTORE, kvm_split_mode, do_restore); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index dffbfd1ab47b..89c686c17f06 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -134,7 +134,7 @@ static inline bool nesting_enabled(struct kvm *kvm) } /* If set, the threads on each CPU core have to be in the same MMU mode */ -static bool no_mixing_hpt_and_radix; +static bool no_mixing_hpt_and_radix __read_mostly; static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); @@ -2890,11 +2890,6 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) return false; - /* Some POWER9 chips require all threads to be in the same MMU mode */ - if (no_mixing_hpt_and_radix && - kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) - return false; - if (n_threads < cip->max_subcore_threads) n_threads = cip->max_subcore_threads; if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) @@ -2933,6 +2928,9 @@ static void prepare_threads(struct kvmppc_vcore *vc) for_each_runnable_thread(i, vcpu, vc) { if (signal_pending(vcpu->arch.run_task)) vcpu->arch.ret = -EINTR; + else if (no_mixing_hpt_and_radix && + kvm_is_radix(vc->kvm) != radix_enabled()) + vcpu->arch.ret = -EINVAL; else if (vcpu->arch.vpa.update_pending || vcpu->arch.slb_shadow.update_pending || vcpu->arch.dtl.update_pending) @@ -3138,7 +3136,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) int controlled_threads; int trap; bool is_power8; - bool hpt_on_radix; /* * Remove from the list any threads that have a signal pending @@ -3171,11 +3168,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * this is a HPT guest on a radix host machine where the * CPU threads may not be in different MMU modes. */ - hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() && - !kvm_is_radix(vc->kvm); - if (((controlled_threads > 1) && - ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) || - (hpt_on_radix && vc->kvm->arch.threads_indep)) { + if ((controlled_threads > 1) && + ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { for_each_runnable_thread(i, vcpu, vc) { vcpu->arch.ret = -EBUSY; kvmppc_remove_runnable(vc, vcpu); @@ -3243,7 +3237,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S) && !cpu_has_feature(CPU_FTR_ARCH_300); - if (split > 1 || hpt_on_radix) { + if (split > 1) { sip = &split_info; memset(&split_info, 0, sizeof(split_info)); for (sub = 0; sub < core_info.n_subcores; ++sub) @@ -3265,13 +3259,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) split_info.subcore_size = subcore_size; } else { split_info.subcore_size = 1; - if (hpt_on_radix) { - /* Use the split_info for LPCR/LPIDR changes */ - split_info.lpcr_req = vc->lpcr; - split_info.lpidr_req = vc->kvm->arch.lpid; - split_info.host_lpcr = vc->kvm->arch.host_lpcr; - split_info.do_set = 1; - } } /* order writes to split_info before kvm_split_mode pointer */ @@ -3281,7 +3268,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (thr = 0; thr < controlled_threads; ++thr) { struct paca_struct *paca = paca_ptrs[pcpu + thr]; - paca->kvm_hstate.tid = thr; paca->kvm_hstate.napping = 0; paca->kvm_hstate.kvm_split_mode = sip; } @@ -3355,10 +3341,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * When doing micro-threading, poke the inactive threads as well. * This gets them to the nap instruction after kvm_do_nap, * which reduces the time taken to unsplit later. - * For POWER9 HPT guest on radix host, we need all the secondary - * threads woken up so they can do the LPCR/LPIDR change. */ - if (cmd_bit || hpt_on_radix) { + if (cmd_bit) { split_info.do_nap = 1; /* ask secondaries to nap when done */ for (thr = 1; thr < threads_per_subcore; ++thr) if (!(active & (1 << thr))) @@ -3419,19 +3403,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) cpu_relax(); ++loops; } - } else if (hpt_on_radix) { - /* Wait for all threads to have seen final sync */ - for (thr = 1; thr < controlled_threads; ++thr) { - struct paca_struct *paca = paca_ptrs[pcpu + thr]; - - while (paca->kvm_hstate.kvm_split_mode) { - HMT_low(); - barrier(); - } - HMT_medium(); - } + split_info.do_nap = 0; } - split_info.do_nap = 0; kvmppc_set_host_core(pcpu); @@ -4216,7 +4189,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, kvmppc_clear_host_core(pcpu); - local_paca->kvm_hstate.tid = 0; local_paca->kvm_hstate.napping = 0; local_paca->kvm_hstate.kvm_split_mode = NULL; kvmppc_start_thread(vcpu, vc); @@ -4401,15 +4373,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) do { /* - * The early POWER9 chips that can't mix radix and HPT threads - * on the same core also need the workaround for the problem - * where the TLB would prefetch entries in the guest exit path - * for radix guests using the guest PIDR value and LPID 0. - * The workaround is in the old path (kvmppc_run_vcpu()) - * but not the new path (kvmhv_run_single_vcpu()). + * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu + * path, which also handles hash and dependent threads mode. */ if (kvm->arch.threads_indep && kvm_is_radix(kvm) && - !no_mixing_hpt_and_radix) + !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); else diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 8053efdf7ea7..f3d3183249fe 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -277,8 +277,7 @@ void kvmhv_commence_exit(int trap) struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; - int me, ee, i, t; - int cpu0; + int me, ee, i; /* Set our bit in the threads-exiting-guest map in the 0xff00 bits of vcore->entry_exit_map */ @@ -320,22 +319,6 @@ void kvmhv_commence_exit(int trap) if ((ee >> 8) == 0) kvmhv_interrupt_vcore(vc, ee); } - - /* - * On POWER9 when running a HPT guest on a radix host (sip != NULL), - * we have to interrupt inactive CPU threads to get them to - * restore the host LPCR value. - */ - if (sip->lpcr_req) { - if (cmpxchg(&sip->do_restore, 0, 1) == 0) { - vc = local_paca->kvm_hstate.kvm_vcore; - cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid; - for (t = 1; t < threads_per_core; ++t) { - if (sip->napped[t]) - kvmhv_rm_send_ipi(cpu0 + t); - } - } - } } struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; @@ -667,95 +650,6 @@ void kvmppc_bad_interrupt(struct pt_regs *regs) panic("Bad KVM trap"); } -/* - * Functions used to switch LPCR HR and UPRT bits on all threads - * when entering and exiting HPT guests on a radix host. - */ - -#define PHASE_REALMODE 1 /* in real mode */ -#define PHASE_SET_LPCR 2 /* have set LPCR */ -#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */ -#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */ - -#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p)) - -static void wait_for_sync(struct kvm_split_mode *sip, int phase) -{ - int thr = local_paca->kvm_hstate.tid; - - sip->lpcr_sync.phase[thr] |= phase; - phase = ALL(phase); - while ((sip->lpcr_sync.allphases & phase) != phase) { - HMT_low(); - barrier(); - } - HMT_medium(); -} - -void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) -{ - int num_sets; - unsigned long rb, set; - - /* wait for every other thread to get to real mode */ - wait_for_sync(sip, PHASE_REALMODE); - - /* Set LPCR and LPIDR */ - mtspr(SPRN_LPCR, sip->lpcr_req); - mtspr(SPRN_LPID, sip->lpidr_req); - isync(); - - /* - * P10 will flush all the congruence class with a single tlbiel - */ - if (cpu_has_feature(CPU_FTR_ARCH_31)) - num_sets = 1; - else - num_sets = POWER9_TLB_SETS_RADIX; - - /* Invalidate the TLB on thread 0 */ - if (local_paca->kvm_hstate.tid == 0) { - sip->do_set = 0; - asm volatile("ptesync" : : : "memory"); - for (set = 0; set < num_sets; ++set) { - rb = TLBIEL_INVAL_SET_LPID + - (set << TLBIEL_INVAL_SET_SHIFT); - asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : - "r" (rb), "r" (0)); - } - asm volatile("ptesync" : : : "memory"); - } - - /* indicate that we have done so and wait for others */ - wait_for_sync(sip, PHASE_SET_LPCR); - /* order read of sip->lpcr_sync.allphases vs. sip->do_set */ - smp_rmb(); -} - -/* - * Called when a thread that has been in the guest needs - * to reload the host LPCR value - but only on POWER9 when - * running a HPT guest on a radix host. - */ -void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) -{ - /* we're out of the guest... */ - wait_for_sync(sip, PHASE_OUT_OF_GUEST); - - mtspr(SPRN_LPID, 0); - mtspr(SPRN_LPCR, sip->host_lpcr); - isync(); - - if (local_paca->kvm_hstate.tid == 0) { - sip->do_restore = 0; - smp_wmb(); /* order store of do_restore vs. phase */ - } - - wait_for_sync(sip, PHASE_RESET_LPCR); - smp_mb(); - local_paca->kvm_hstate.kvm_split_mode = NULL; -} - static void kvmppc_end_cede(struct kvm_vcpu *vcpu) { vcpu->arch.ceded = 0; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 65161c062dc5..565464e3f9a5 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -87,19 +87,6 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) RFI_TO_KERNEL kvmppc_call_hv_entry: -BEGIN_FTR_SECTION - /* On P9, do LPCR setting, if necessary */ - ld r3, HSTATE_SPLIT_MODE(r13) - cmpdi r3, 0 - beq 46f - lwz r4, KVM_SPLIT_DO_SET(r3) - cmpwi r4, 0 - beq 46f - bl kvmhv_p9_set_lpcr - nop -46: -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_hv_entry @@ -363,11 +350,11 @@ kvm_secondary_got_guest: LOAD_REG_ADDR(r6, decrementer_max) ld r6, 0(r6) mtspr SPRN_HDEC, r6 +BEGIN_FTR_SECTION /* and set per-LPAR registers, if doing dynamic micro-threading */ ld r6, HSTATE_SPLIT_MODE(r13) cmpdi r6, 0 beq 63f -BEGIN_FTR_SECTION ld r0, KVM_SPLIT_RPR(r6) mtspr SPRN_RPR, r0 ld r0, KVM_SPLIT_PMMAR(r6) @@ -375,16 +362,7 @@ BEGIN_FTR_SECTION ld r0, KVM_SPLIT_LDBAR(r6) mtspr SPRN_LDBAR, r0 isync -FTR_SECTION_ELSE - /* On P9 we use the split_info for coordinating LPCR changes */ - lwz r4, KVM_SPLIT_DO_SET(r6) - cmpwi r4, 0 - beq 1f - mr r3, r6 - bl kvmhv_p9_set_lpcr - nop -1: -ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 63: /* Order load of vcpu after load of vcore */ lwsync @@ -454,19 +432,15 @@ kvm_no_guest: mtcr r5 blr -53: HMT_LOW +53: +BEGIN_FTR_SECTION + HMT_LOW ld r5, HSTATE_KVM_VCORE(r13) cmpdi r5, 0 bne 60f ld r3, HSTATE_SPLIT_MODE(r13) cmpdi r3, 0 beq kvm_no_guest - lwz r0, KVM_SPLIT_DO_SET(r3) - cmpwi r0, 0 - bne kvmhv_do_set - lwz r0, KVM_SPLIT_DO_RESTORE(r3) - cmpwi r0, 0 - bne kvmhv_do_restore lbz r0, KVM_SPLIT_DO_NAP(r3) cmpwi r0, 0 beq kvm_no_guest @@ -474,24 +448,19 @@ kvm_no_guest: b kvm_unsplit_nap 60: HMT_MEDIUM b kvm_secondary_got_guest +FTR_SECTION_ELSE + HMT_LOW + ld r5, HSTATE_KVM_VCORE(r13) + cmpdi r5, 0 + beq kvm_no_guest + HMT_MEDIUM + b kvm_secondary_got_guest +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 54: li r0, KVM_HWTHREAD_IN_KVM stb r0, HSTATE_HWTHREAD_STATE(r13) b kvm_no_guest -kvmhv_do_set: - /* Set LPCR, LPIDR etc. on P9 */ - HMT_MEDIUM - bl kvmhv_p9_set_lpcr - nop - b kvm_no_guest - -kvmhv_do_restore: - HMT_MEDIUM - bl kvmhv_p9_restore_lpcr - nop - b kvm_no_guest - /* * Here the primary thread is trying to return the core to * whole-core mode, so we need to nap. @@ -529,7 +498,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Set kvm_split_mode.napped[tid] = 1 */ ld r3, HSTATE_SPLIT_MODE(r13) li r0, 1 - lbz r4, HSTATE_TID(r13) + lhz r4, PACAPACAINDEX(r13) + clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ addi r4, r4, KVM_SPLIT_NAPPED stbx r0, r3, r4 /* Check the do_nap flag again after setting napped[] */ @@ -1958,24 +1928,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 19: lis r8,0x7fff /* MAX_INT@h */ mtspr SPRN_HDEC,r8 -16: -BEGIN_FTR_SECTION - /* On POWER9 with HPT-on-radix we need to wait for all other threads */ - ld r3, HSTATE_SPLIT_MODE(r13) - cmpdi r3, 0 - beq 47f - lwz r8, KVM_SPLIT_DO_RESTORE(r3) - cmpwi r8, 0 - beq 47f - bl kvmhv_p9_restore_lpcr - nop - b 48f -47: -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - ld r8,KVM_HOST_LPCR(r4) +16: ld r8,KVM_HOST_LPCR(r4) mtspr SPRN_LPCR,r8 isync -48: + #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING /* Finish timing, if we have a vcpu */ ld r4, HSTATE_KVM_VCPU(r13) @@ -2799,8 +2755,10 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) beq kvm_end_cede cmpwi r0, NAPPING_NOVCPU beq kvm_novcpu_wakeup +BEGIN_FTR_SECTION cmpwi r0, NAPPING_UNSPLIT beq kvm_unsplit_wakeup +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) twi 31,0,0 /* Nap state must not be zero */ 33: mr r4, r3 -- cgit v1.2.3 From 68ad28a4cdd478fa2ae37951b911ab664011098b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 18 Jan 2021 16:28:07 +1000 Subject: KVM: PPC: Book3S HV: Fix radix guest SLB side channel The slbmte instruction is legal in radix mode, including radix guest mode. This means radix guests can load the SLB with arbitrary data. KVM host does not clear the SLB when exiting a guest if it was a radix guest, which would allow a rogue radix guest to use the SLB as a side channel to communicate with other guests. Fix this by ensuring the SLB is cleared when coming out of a radix guest. Only the first 4 entries are a concern, because radix guests always run with LPCR[UPRT]=1, which limits the reach of slbmte. slbia is not used (except in a non-performance-critical path) because it can clear cached translations. Signed-off-by: Nicholas Piggin Reviewed-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 39 ++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 565464e3f9a5..174e7b6eba34 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1171,6 +1171,20 @@ EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) mr r4, r3 b fast_guest_entry_c guest_exit_short_path: + /* + * Malicious or buggy radix guests may have inserted SLB entries + * (only 0..3 because radix always runs with UPRT=1), so these must + * be cleared here to avoid side-channels. slbmte is used rather + * than slbia, as it won't clear cached translations. + */ + li r0,0 + slbmte r0,r0 + li r4,1 + slbmte r0,r4 + li r4,2 + slbmte r0,r4 + li r4,3 + slbmte r0,r4 li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) @@ -1483,7 +1497,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ lbz r0, KVM_RADIX(r5) li r5, 0 cmpwi r0, 0 - bne 3f /* for radix, save 0 entries */ + bne 0f /* for radix, save 0 entries */ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ mtctr r0 li r6,0 @@ -1504,12 +1518,9 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ slbmte r0,r0 slbia ptesync -3: stw r5,VCPU_SLB_MAX(r9) + stw r5,VCPU_SLB_MAX(r9) /* load host SLB entries */ -BEGIN_MMU_FTR_SECTION - b 0f -END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ld r8,PACA_SLBSHADOWPTR(r13) .rept SLB_NUM_BOLTED @@ -1522,7 +1533,17 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) slbmte r6,r5 1: addi r8,r8,16 .endr -0: + b guest_bypass + +0: /* Sanitise radix guest SLB, see guest_exit_short_path comment. */ + li r0,0 + slbmte r0,r0 + li r4,1 + slbmte r0,r4 + li r4,2 + slbmte r0,r4 + li r4,3 + slbmte r0,r4 guest_bypass: stw r12, STACK_SLOT_TRAP(r1) @@ -3325,12 +3346,14 @@ BEGIN_FTR_SECTION mtspr SPRN_DAWRX1, r0 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) + /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */ + slbmte r0, r0 + slbia + BEGIN_MMU_FTR_SECTION b 4f END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) - slbmte r0, r0 - slbia ptesync ld r8, PACA_SLBSHADOWPTR(r13) .rept SLB_NUM_BOLTED -- cgit v1.2.3 From 078ebe35fc0a8ab847809765be34937e656e70a6 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 18 Jan 2021 16:28:08 +1000 Subject: KVM: PPC: Book3S HV: No need to clear radix host SLB before loading HPT guest Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 174e7b6eba34..e413d8e34dd8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -902,15 +902,19 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) cmpdi r3, 512 /* 1 microsecond */ blt hdec_soon - /* For hash guest, clear out and reload the SLB */ ld r6, VCPU_KVM(r4) lbz r0, KVM_RADIX(r6) cmpwi r0, 0 bne 9f + + /* For hash guest, clear out and reload the SLB */ +BEGIN_MMU_FTR_SECTION + /* Radix host won't have populated the SLB, so no need to clear */ li r6, 0 slbmte r6, r6 slbia ptesync +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ lwz r5,VCPU_SLB_MAX(r4) -- cgit v1.2.3 From 7a7f94a3a9c7a5d25ec172c94a42509dce61eb0b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 18 Jan 2021 16:28:09 +1000 Subject: KVM: PPC: Book3S HV: Use POWER9 SLBIA IH=6 variant to clear SLB IH=6 may preserve hypervisor real-mode ERAT entries and is the recommended SLBIA hint for switching partitions. Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index e413d8e34dd8..426bdc59fde2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -912,7 +912,7 @@ BEGIN_MMU_FTR_SECTION /* Radix host won't have populated the SLB, so no need to clear */ li r6, 0 slbmte r6, r6 - slbia + PPC_SLBIA(6) ptesync END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) @@ -1520,7 +1520,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ /* Finally clear out the SLB */ li r0,0 slbmte r0,r0 - slbia + PPC_SLBIA(6) ptesync stw r5,VCPU_SLB_MAX(r9) @@ -3352,7 +3352,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */ slbmte r0, r0 - slbia + PPC_SLBIA(6) BEGIN_MMU_FTR_SECTION b 4f -- cgit v1.2.3 From 63e9f2357309439f09509aa924faa3a9de84a38e Mon Sep 17 00:00:00 2001 From: Yang Li Date: Tue, 2 Feb 2021 11:30:53 +0800 Subject: KVM: PPC: remove unneeded semicolon Eliminate the following coccicheck warning: ./arch/powerpc/kvm/booke.c:701:2-3: Unneeded semicolon Reported-by: Abaci Robot Signed-off-by: Yang Li Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/booke.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 288a9820ec01..f38ae3e54b37 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -698,7 +698,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); r = 1; - }; + } return r; } -- cgit v1.2.3 From 25edcc50d76c834479d11fcc7de46f3da4d95121 Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Thu, 4 Feb 2021 17:05:17 -0300 Subject: KVM: PPC: Book3S HV: Save and restore FSCR in the P9 path The Facility Status and Control Register is a privileged SPR that defines the availability of some features in problem state. Since it can be written by the guest, we must restore it to the previous host value after guest exit. This restoration is currently done by taking the value from current->thread.fscr, which in the P9 path is not enough anymore because the guest could context switch the QEMU thread, causing the guest-current value to be saved into the thread struct. The above situation manifested when running a QEMU linked against a libc with System Call Vectored support, which causes scv instructions to be run by QEMU early during the guest boot (during SLOF), at which point the FSCR is 0 due to guest entry. After a few scv calls (1 to a couple hundred), the context switching happens and the QEMU thread runs with the guest value, resulting in a Facility Unavailable interrupt. This patch saves and restores the host value of FSCR in the inner guest entry loop in a way independent of current->thread.fscr. The old way of doing it is still kept in place because it works for the old entry path. Signed-off-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 89c686c17f06..f6d470157fcb 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3611,6 +3611,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long host_tidr = mfspr(SPRN_TIDR); unsigned long host_iamr = mfspr(SPRN_IAMR); unsigned long host_amr = mfspr(SPRN_AMR); + unsigned long host_fscr = mfspr(SPRN_FSCR); s64 dec; u64 tb; int trap, save_pmu; @@ -3751,6 +3752,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, if (host_amr != vcpu->arch.amr) mtspr(SPRN_AMR, host_amr); + if (host_fscr != vcpu->arch.fscr) + mtspr(SPRN_FSCR, host_fscr); + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); store_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC -- cgit v1.2.3 From a722076e947023a0d6ffca79661324c45ff30641 Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Fri, 5 Feb 2021 13:41:54 -0300 Subject: KVM: PPC: Don't always report hash MMU capability for P9 < DD2.2 These machines don't support running both MMU types at the same time, so remove the KVM_CAP_PPC_MMU_HASH_V3 capability when the host is using Radix MMU. [paulus@ozlabs.org - added defensive check on kvmppc_hv_ops->hash_v3_possible] Signed-off-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 10 ++++++++++ arch/powerpc/kvm/powerpc.c | 4 ++-- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 13c39d24dda5..df4bda867bab 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -315,6 +315,7 @@ struct kvmppc_ops { int (*enable_svm)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm); int (*enable_dawr1)(struct kvm *kvm); + bool (*hash_v3_possible)(void); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index f6d470157fcb..f09708da216e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5625,6 +5625,15 @@ static int kvmhv_enable_dawr1(struct kvm *kvm) return 0; } +static bool kvmppc_hash_v3_possible(void) +{ + if (radix_enabled() && no_mixing_hpt_and_radix) + return false; + + return cpu_has_feature(CPU_FTR_ARCH_300) && + cpu_has_feature(CPU_FTR_HVMODE); +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5669,6 +5678,7 @@ static struct kvmppc_ops kvm_ops_hv = { .enable_svm = kvmhv_enable_svm, .svm_off = kvmhv_svm_off, .enable_dawr1 = kvmhv_enable_dawr1, + .hash_v3_possible = kvmppc_hash_v3_possible, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 5914c32d9ef3..6c083a9b3545 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -611,8 +611,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !!(hv_enabled && radix_enabled()); break; case KVM_CAP_PPC_MMU_HASH_V3: - r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && - cpu_has_feature(CPU_FTR_HVMODE)); + r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && + kvmppc_hv_ops->hash_v3_possible()); break; case KVM_CAP_PPC_NESTED_HV: r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && -- cgit v1.2.3 From ab950e1acd2175eae825cfcbac621c0625ad2a86 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Feb 2021 17:09:45 +1100 Subject: KVM: PPC: Book3S HV: Ensure radix guest has no SLB entries Commit 68ad28a4cdd4 ("KVM: PPC: Book3S HV: Fix radix guest SLB side channel") changed the older guest entry path, with the side effect that vcpu->arch.slb_max no longer gets cleared for a radix guest. This means that a HPT guest which loads some SLB entries, switches to radix mode, runs the guest using the old guest entry path (e.g., because the indep_threads_mode module parameter has been set to false), and then switches back to HPT mode would now see the old SLB entries being present, whereas previously it would have seen no SLB entries. To avoid changing guest-visible behaviour, this adds a store instruction to clear vcpu->arch.slb_max for a radix guest using the old guest entry path. Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 426bdc59fde2..0bc1c8248050 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1539,8 +1539,12 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ .endr b guest_bypass -0: /* Sanitise radix guest SLB, see guest_exit_short_path comment. */ +0: /* + * Sanitise radix guest SLB, see guest_exit_short_path comment. + * We clear vcpu->arch.slb_max to match earlier behaviour. + */ li r0,0 + stw r0,VCPU_SLB_MAX(r9) slbmte r0,r0 li r4,1 slbmte r0,r4 -- cgit v1.2.3 From 72476aaa469179222b92c380de60c76b4cb9a318 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Feb 2021 15:38:30 +1000 Subject: KVM: PPC: Book3S HV: Fix host radix SLB optimisation with hash guests Commit 68ad28a4cdd4 ("KVM: PPC: Book3S HV: Fix radix guest SLB side channel") incorrectly removed the radix host instruction patch to skip re-loading the host SLB entries when exiting from a hash guest. Restore it. Fixes: 68ad28a4cdd4 ("KVM: PPC: Book3S HV: Fix radix guest SLB side channel") Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 0bc1c8248050..5e634db4809b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1525,6 +1525,9 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ stw r5,VCPU_SLB_MAX(r9) /* load host SLB entries */ +BEGIN_MMU_FTR_SECTION + b guest_bypass +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ld r8,PACA_SLBSHADOWPTR(r13) .rept SLB_NUM_BOLTED -- cgit v1.2.3 From d8d0da4eee5c4e86ea08abde6975848376b4ac13 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 10 Feb 2021 13:16:31 -0500 Subject: locking/arch: Move qrwlock.h include after qspinlock.h include/asm-generic/qrwlock.h was trying to get arch_spin_is_locked via asm-generic/qspinlock.h. However, this does not work because architectures might be using queued rwlocks but not queued spinlocks (csky), or because they might be defining their own queued_* macros before including asm/qspinlock.h. To fix this, ensure that asm/spinlock.h always includes qrwlock.h after defining arch_spin_is_locked (either directly for csky, or via asm/qspinlock.h for other architectures). The only inclusion elsewhere is in kernel/locking/qrwlock.c. That one is really unnecessary because the file is only compiled in SMP configurations (config QUEUED_RWLOCKS depends on SMP) and in that case linux/spinlock.h already includes asm/qrwlock.h if needed, via asm/spinlock.h. Reported-by: Guenter Roeck Signed-off-by: Waiman Long Fixes: 26128cb6c7e6 ("locking/rwlocks: Add contention detection for rwlocks") Tested-by: Guenter Roeck Reviewed-by: Ben Gardon [Add arch/sparc and kernel/locking parts per discussion with Waiman. - Paolo] Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/spinlock.h | 2 +- arch/mips/include/asm/spinlock.h | 2 +- arch/sparc/include/asm/spinlock_64.h | 2 +- arch/xtensa/include/asm/spinlock.h | 2 +- include/asm-generic/qrwlock.h | 3 ++- kernel/locking/qrwlock.c | 1 - 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 9083d6992603..0525c0b089ed 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -5,8 +5,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include #include +#include /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 8a88eb265516..6ce2117e49f6 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -10,7 +10,6 @@ #define _ASM_SPINLOCK_H #include -#include #include @@ -27,5 +26,6 @@ static inline void queued_spin_unlock(struct qspinlock *lock) } #include +#include #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 7fc82a233f49..3a9a0b0c7465 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -11,8 +11,8 @@ #include #include -#include #include +#include #endif /* !(__ASSEMBLY__) */ diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 584b0de6f2ca..41c449ece2d8 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -12,8 +12,8 @@ #define _XTENSA_SPINLOCK_H #include -#include #include +#include #define smp_mb__after_spinlock() smp_mb() diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 0020d3b820a7..7ae0ece07b4e 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -14,7 +14,8 @@ #include #include -#include + +/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ /* * Writer states & reader shift and bias. diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index fe9ca92faa2a..4786dd271b45 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -12,7 +12,6 @@ #include #include #include -#include /** * queued_read_lock_slowpath - acquire read lock of a queue rwlock -- cgit v1.2.3 From 2e215216d66ffae2b3f4dab595e548ad5dcc57a8 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 10 Feb 2021 07:59:58 +0000 Subject: KVM: SVM: Make symbol 'svm_gp_erratum_intercept' static The sparse tool complains as follows: arch/x86/kvm/svm/svm.c:204:6: warning: symbol 'svm_gp_erratum_intercept' was not declared. Should it be static? This symbol is not used outside of svm.c, so this commit marks it static. Fixes: 82a11e9c6fa2b ("KVM: SVM: Add emulation support for #GP triggered by SVM instructions") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Message-Id: <20210210075958.1096317-1-weiyongjun1@huawei.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 42d4710074a6..adb3619a3c16 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -201,7 +201,7 @@ module_param(sev_es, int, 0444); bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); -bool svm_gp_erratum_intercept = true; +static bool svm_gp_erratum_intercept = true; static u8 rsm_ins_bytes[] = "\x0f\xaa"; -- cgit v1.2.3 From 47bc726fe8d1910872dc3d7e7ec70f8b9e6043b7 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 10 Feb 2021 03:17:19 +0000 Subject: KVM: selftests: Add operand to vmsave/vmload/vmrun in svm.c Building the KVM selftests with LLVM's integrated assembler fails with: $ CFLAGS=-fintegrated-as make -C tools/testing/selftests/kvm CC=clang lib/x86_64/svm.c:77:16: error: too few operands for instruction asm volatile ("vmsave\n\t" : : "a" (vmcb_gpa) : "memory"); ^ :1:2: note: instantiated into assembly here vmsave ^ lib/x86_64/svm.c:134:3: error: too few operands for instruction "vmload\n\t" ^ :1:2: note: instantiated into assembly here vmload ^ This is because LLVM IAS does not currently support calling vmsave, vmload, or vmload without an explicit %rax operand. Add an explicit operand to vmsave, vmload, and vmrum in svm.c. Fixing this was suggested by Sean Christopherson. Tested: building without this error in clang 11. The following patch (not queued yet) needs to be applied to solve the other remaining error: "selftests: kvm: remove reassignment of non-absolute variables". Suggested-by: Sean Christopherson Link: https://lore.kernel.org/kvm/X+Df2oQczVBmwEzi@google.com/ Reviewed-by: Jim Mattson Signed-off-by: Ricardo Koller Message-Id: <20210210031719.769837-1-ricarkol@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/x86_64/svm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c index 3a5c72ed2b79..827fe6028dd4 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/svm.c +++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c @@ -74,7 +74,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); memset(vmcb, 0, sizeof(*vmcb)); - asm volatile ("vmsave\n\t" : : "a" (vmcb_gpa) : "memory"); + asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr); @@ -131,19 +131,19 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) { asm volatile ( - "vmload\n\t" + "vmload %[vmcb_gpa]\n\t" "mov rflags, %%r15\n\t" // rflags "mov %%r15, 0x170(%[vmcb])\n\t" "mov guest_regs, %%r15\n\t" // rax "mov %%r15, 0x1f8(%[vmcb])\n\t" LOAD_GPR_C - "vmrun\n\t" + "vmrun %[vmcb_gpa]\n\t" SAVE_GPR_C "mov 0x170(%[vmcb]), %%r15\n\t" // rflags "mov %%r15, rflags\n\t" "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax "mov %%r15, guest_regs\n\t" - "vmsave\n\t" + "vmsave %[vmcb_gpa]\n\t" : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa) : "r15", "memory"); } -- cgit v1.2.3 From 346b59f220a200f15e8d4ba7581347f6e058b69a Mon Sep 17 00:00:00 2001 From: Peter Shier Date: Tue, 9 Feb 2021 17:17:47 -0800 Subject: KVM: selftests: Add missing header file needed by xAPIC IPI tests Fixes: 678e90a349a4 ("KVM: selftests: Test IPI to halted vCPU in xAPIC while backing page moves") Cc: Andrew Jones Cc: Jim Mattson Signed-off-by: Peter Shier Signed-off-by: Sean Christopherson Message-Id: <20210210011747.240913-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/include/numaif.h | 55 ++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/numaif.h diff --git a/tools/testing/selftests/kvm/include/numaif.h b/tools/testing/selftests/kvm/include/numaif.h new file mode 100644 index 000000000000..b020547403fd --- /dev/null +++ b/tools/testing/selftests/kvm/include/numaif.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tools/testing/selftests/kvm/include/numaif.h + * + * Copyright (C) 2020, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Header file that provides access to NUMA API functions not explicitly + * exported to user space. + */ + +#ifndef SELFTEST_KVM_NUMAIF_H +#define SELFTEST_KVM_NUMAIF_H + +#define __NR_get_mempolicy 239 +#define __NR_migrate_pages 256 + +/* System calls */ +long get_mempolicy(int *policy, const unsigned long *nmask, + unsigned long maxnode, void *addr, int flags) +{ + return syscall(__NR_get_mempolicy, policy, nmask, + maxnode, addr, flags); +} + +long migrate_pages(int pid, unsigned long maxnode, + const unsigned long *frommask, + const unsigned long *tomask) +{ + return syscall(__NR_migrate_pages, pid, maxnode, frommask, tomask); +} + +/* Policies */ +#define MPOL_DEFAULT 0 +#define MPOL_PREFERRED 1 +#define MPOL_BIND 2 +#define MPOL_INTERLEAVE 3 + +#define MPOL_MAX MPOL_INTERLEAVE + +/* Flags for get_mem_policy */ +#define MPOL_F_NODE (1<<0) /* return next il node or node of address */ + /* Warning: MPOL_F_NODE is unsupported and + * subject to change. Don't use. + */ +#define MPOL_F_ADDR (1<<1) /* look up vma using address */ +#define MPOL_F_MEMS_ALLOWED (1<<2) /* query nodes allowed in cpuset */ + +/* Flags for mbind */ +#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ +#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ +#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ + +#endif /* SELFTEST_KVM_NUMAIF_H */ -- cgit v1.2.3 From 2f3b90fd90b59d1bde088f57ec3d25f81d63bd59 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 10 Feb 2021 10:26:05 -0800 Subject: KVM: selftests: Ignore recently added Xen tests' build output Add the new Xen test binaries to KVM selftest's .gitnore. Signed-off-by: Sean Christopherson Message-Id: <20210210182609.435200-2-seanjc@google.com> Reviewed-by: David Woodhouse Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 1b32c97f8c82..3a84394829ea 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -26,6 +26,8 @@ /x86_64/vmx_set_nested_state_test /x86_64/vmx_tsc_adjust_test /x86_64/xapic_ipi_test +/x86_64/xen_shinfo_test +/x86_64/xen_vmcall_test /x86_64/xss_msr_test /x86_64/vmx_pmu_msrs_test /demand_paging_test -- cgit v1.2.3 From a685d99208621b39dd0764e2bfdddfd19bb080f2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 10 Feb 2021 10:26:06 -0800 Subject: KVM: selftests: Fix size of memslots created by Xen tests For better or worse, the memslot APIs take the number of pages, not the size in bytes. The Xen tests need 2 pages, not 8192 pages. Fixes: 8d4e7e80838f ("KVM: x86: declare Xen HVM shared info capability and add test case") Cc: David Woodhouse Signed-off-by: Sean Christopherson Message-Id: <20210210182609.435200-3-seanjc@google.com> Reviewed-by: David Woodhouse Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c | 3 +-- tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index bdb3feb86b5b..cb3963957b3b 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -79,8 +79,7 @@ int main(int argc, char *argv[]) /* Map a region for the shared_info page */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, - SHINFO_REGION_GPA, SHINFO_REGION_SLOT, - 2 * getpagesize(), 0); + SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0); virt_map(vm, SHINFO_REGION_GPA, SHINFO_REGION_GPA, 2, 0); struct kvm_xen_hvm_config hvmc = { diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index 86653361c695..8389e0bfd711 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -102,8 +102,7 @@ int main(int argc, char *argv[]) /* Map a region for the hypercall pages */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, - HCALL_REGION_GPA, HCALL_REGION_SLOT, - 2 * getpagesize(), 0); + HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0); virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2, 0); for (;;) { -- cgit v1.2.3 From fc79ef3e7b7590e42fe01bc9d9e2635413905716 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 10 Feb 2021 10:26:07 -0800 Subject: KVM: selftests: Fix hex vs. decimal snafu in Xen test The Xen shinfo selftest uses '40' when setting the GPA of the vCPU info struct, but checks for the result at '0x40'. Arbitrarily use the hex version to resolve the bug. Fixes: 8d4e7e80838f ("KVM: x86: declare Xen HVM shared info capability and add test case") Cc: David Woodhouse Signed-off-by: Sean Christopherson Message-Id: <20210210182609.435200-4-seanjc@google.com> Reviewed-by: David Woodhouse Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index cb3963957b3b..b2a3be9eba8e 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -102,7 +102,7 @@ int main(int argc, char *argv[]) struct kvm_xen_vcpu_attr vi = { .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, - .u.gpa = SHINFO_REGION_GPA + 40, + .u.gpa = SHINFO_REGION_GPA + 0x40, }; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi); -- cgit v1.2.3 From f1b83973a164073d9b2e5e3d2c556e7ef6743cf6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 10 Feb 2021 10:26:08 -0800 Subject: KVM: selftests: Don't bother mapping GVA for Xen shinfo test Don't bother mapping the Xen shinfo pages into the guest, they don't need to be accessed using the GVAs and passing a define with "GPA" in the name to addr_gva2hpa() is confusing. Cc: David Woodhouse Signed-off-by: Sean Christopherson Message-Id: <20210210182609.435200-5-seanjc@google.com> Reviewed-by: David Woodhouse Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index b2a3be9eba8e..9246ea310587 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -80,7 +80,6 @@ int main(int argc, char *argv[]) /* Map a region for the shared_info page */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0); - virt_map(vm, SHINFO_REGION_GPA, SHINFO_REGION_GPA, 2, 0); struct kvm_xen_hvm_config hvmc = { .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, @@ -147,9 +146,9 @@ int main(int argc, char *argv[]) struct pvclock_wall_clock *wc; struct pvclock_vcpu_time_info *ti, *ti2; - wc = addr_gva2hva(vm, SHINFO_REGION_GPA + 0xc00); - ti = addr_gva2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20); - ti2 = addr_gva2hva(vm, PVTIME_ADDR); + wc = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0xc00); + ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20); + ti2 = addr_gpa2hva(vm, PVTIME_ADDR); vm_ts.tv_sec = wc->sec; vm_ts.tv_nsec = wc->nsec; -- cgit v1.2.3 From 7137b7ae6f20076a1e05546db61f9fb3d3f9e97d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 10 Feb 2021 10:26:09 -0800 Subject: KVM: x86/xen: Explicitly pad struct compat_vcpu_info to 64 bytes Add a 2 byte pad to struct compat_vcpu_info so that the sum size of its fields is actually 64 bytes. The effective size without the padding is also 64 bytes due to the compiler aligning evtchn_pending_sel to a 4-byte boundary, but depending on compiler alignment is subtle and unnecessary. Opportunistically replace spaces with tables in the other fields. Cc: David Woodhouse Signed-off-by: Sean Christopherson Message-Id: <20210210182609.435200-6-seanjc@google.com> Reviewed-by: David Woodhouse Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 4b32489c0cec..b66a921776f4 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -49,11 +49,12 @@ struct compat_arch_vcpu_info { }; struct compat_vcpu_info { - uint8_t evtchn_upcall_pending; - uint8_t evtchn_upcall_mask; - uint32_t evtchn_pending_sel; - struct compat_arch_vcpu_info arch; - struct pvclock_vcpu_time_info time; + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + uint16_t pad; + uint32_t evtchn_pending_sel; + struct compat_arch_vcpu_info arch; + struct pvclock_vcpu_time_info time; }; /* 64 bytes (x86) */ struct compat_arch_shared_info { -- cgit v1.2.3