diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-26 14:20:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-26 14:20:14 -0700 |
commit | bf9095424d027e942e1d1ee74977e17b7df8e455 (patch) | |
tree | 57659cf68b7df09005bc5ada4d315d66472cebf3 /arch/riscv/include | |
parent | 98931dd95fd489fcbfa97da563505a6f071d7c77 (diff) | |
parent | ffd1925a596ce68bed7d81c61cb64bc35f788a9d (diff) | |
download | linux-bf9095424d027e942e1d1ee74977e17b7df8e455.tar.bz2 |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"S390:
- ultravisor communication device driver
- fix TEID on terminating storage key ops
RISC-V:
- Added Sv57x4 support for G-stage page table
- Added range based local HFENCE functions
- Added remote HFENCE functions based on VCPU requests
- Added ISA extension registers in ONE_REG interface
- Updated KVM RISC-V maintainers entry to cover selftests support
ARM:
- Add support for the ARMv8.6 WFxT extension
- Guard pages for the EL2 stacks
- Trap and emulate AArch32 ID registers to hide unsupported features
- Ability to select and save/restore the set of hypercalls exposed to
the guest
- Support for PSCI-initiated suspend in collaboration with userspace
- GICv3 register-based LPI invalidation support
- Move host PMU event merging into the vcpu data structure
- GICv3 ITS save/restore fixes
- The usual set of small-scale cleanups and fixes
x86:
- New ioctls to get/set TSC frequency for a whole VM
- Allow userspace to opt out of hypercall patching
- Only do MSR filtering for MSRs accessed by rdmsr/wrmsr
AMD SEV improvements:
- Add KVM_EXIT_SHUTDOWN metadata for SEV-ES
- V_TSC_AUX support
Nested virtualization improvements for AMD:
- Support for "nested nested" optimizations (nested vVMLOAD/VMSAVE,
nested vGIF)
- Allow AVIC to co-exist with a nested guest running
- Fixes for LBR virtualizations when a nested guest is running, and
nested LBR virtualization support
- PAUSE filtering for nested hypervisors
Guest support:
- Decoupling of vcpu_is_preempted from PV spinlocks"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (199 commits)
KVM: x86: Fix the intel_pt PMI handling wrongly considered from guest
KVM: selftests: x86: Sync the new name of the test case to .gitignore
Documentation: kvm: reorder ARM-specific section about KVM_SYSTEM_EVENT_SUSPEND
x86, kvm: use correct GFP flags for preemption disabled
KVM: LAPIC: Drop pending LAPIC timer injection when canceling the timer
x86/kvm: Alloc dummy async #PF token outside of raw spinlock
KVM: x86: avoid calling x86 emulator without a decoded instruction
KVM: SVM: Use kzalloc for sev ioctl interfaces to prevent kernel data leak
x86/fpu: KVM: Set the base guest FPU uABI size to sizeof(struct kvm_xsave)
s390/uv_uapi: depend on CONFIG_S390
KVM: selftests: x86: Fix test failure on arch lbr capable platforms
KVM: LAPIC: Trace LAPIC timer expiration on every vmentry
KVM: s390: selftest: Test suppression indication on key prot exception
KVM: s390: Don't indicate suppression on dirtying, failing memop
selftests: drivers/s390x: Add uvdevice tests
drivers/s390/char: Add Ultravisor io device
MAINTAINERS: Update KVM RISC-V entry to cover selftests support
RISC-V: KVM: Introduce ISA extension register
RISC-V: KVM: Cleanup stale TLB entries when host CPU changes
RISC-V: KVM: Add remote HFENCE functions based on VCPU requests
...
Diffstat (limited to 'arch/riscv/include')
-rw-r--r-- | arch/riscv/include/asm/csr.h | 1 | ||||
-rw-r--r-- | arch/riscv/include/asm/kvm_host.h | 124 | ||||
-rw-r--r-- | arch/riscv/include/uapi/asm/kvm.h | 20 |
3 files changed, 122 insertions, 23 deletions
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index e935f27b10fd..cc40521e438b 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -117,6 +117,7 @@ #define HGATP_MODE_SV32X4 _AC(1, UL) #define HGATP_MODE_SV39X4 _AC(8, UL) #define HGATP_MODE_SV48X4 _AC(9, UL) +#define HGATP_MODE_SV57X4 _AC(10, UL) #define HGATP32_MODE_SHIFT 31 #define HGATP32_VMID_SHIFT 22 diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index cd4bbcecb0fb..319c8aeb42af 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,12 +12,12 @@ #include <linux/types.h> #include <linux/kvm.h> #include <linux/kvm_types.h> +#include <linux/spinlock.h> #include <asm/csr.h> #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_timer.h> -#define KVM_MAX_VCPUS \ - ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1) +#define KVM_MAX_VCPUS 1024 #define KVM_HALT_POLL_NS_DEFAULT 500000 @@ -27,6 +27,31 @@ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) +#define KVM_REQ_FENCE_I \ + KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH +#define KVM_REQ_HFENCE_VVMA_ALL \ + KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HFENCE \ + KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + +enum kvm_riscv_hfence_type { + KVM_RISCV_HFENCE_UNKNOWN = 0, + KVM_RISCV_HFENCE_GVMA_VMID_GPA, + KVM_RISCV_HFENCE_VVMA_ASID_GVA, + KVM_RISCV_HFENCE_VVMA_ASID_ALL, + KVM_RISCV_HFENCE_VVMA_GVA, +}; + +struct kvm_riscv_hfence { + enum kvm_riscv_hfence_type type; + unsigned long asid; + unsigned long order; + gpa_t addr; + gpa_t size; +}; + +#define KVM_RISCV_VCPU_MAX_HFENCE 64 struct kvm_vm_stat { struct kvm_vm_stat_generic generic; @@ -54,10 +79,10 @@ struct kvm_vmid { }; struct kvm_arch { - /* stage2 vmid */ + /* G-stage vmid */ struct kvm_vmid vmid; - /* stage2 page table */ + /* G-stage page table */ pgd_t *pgd; phys_addr_t pgd_phys; @@ -141,6 +166,9 @@ struct kvm_vcpu_arch { /* VCPU ran at least once */ bool ran_atleast_once; + /* Last Host CPU on which Guest VCPU exited */ + int last_exit_cpu; + /* ISA feature bits (similar to MISA) */ unsigned long isa; @@ -179,6 +207,12 @@ struct kvm_vcpu_arch { /* VCPU Timer */ struct kvm_vcpu_timer timer; + /* HFENCE request queue */ + spinlock_t hfence_lock; + unsigned long hfence_head; + unsigned long hfence_tail; + struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; + /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; @@ -201,27 +235,71 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER -void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa_divby_4, - unsigned long vmid); -void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid); -void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4); -void __kvm_riscv_hfence_gvma_all(void); - -int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, +#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 + +void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, + gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); +void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_local_hfence_gvma_all(void); +void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, + unsigned long asid, + unsigned long gva, + unsigned long gvsz, + unsigned long order); +void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, + unsigned long asid); +void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, + unsigned long gva, unsigned long gvsz, + unsigned long order); +void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); + +void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); + +void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); + +void kvm_riscv_fence_i(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); +void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); +void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order, unsigned long asid); +void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long asid); +void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order); +void kvm_riscv_hfence_vvma_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); + +int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write); -int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); -void kvm_riscv_stage2_free_pgd(struct kvm *kvm); -void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); -void kvm_riscv_stage2_mode_detect(void); -unsigned long kvm_riscv_stage2_mode(void); -int kvm_riscv_stage2_gpa_bits(void); - -void kvm_riscv_stage2_vmid_detect(void); -unsigned long kvm_riscv_stage2_vmid_bits(void); -int kvm_riscv_stage2_vmid_init(struct kvm *kvm); -bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid); -void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu); +int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); +void kvm_riscv_gstage_free_pgd(struct kvm *kvm); +void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); +void kvm_riscv_gstage_mode_detect(void); +unsigned long kvm_riscv_gstage_mode(void); +int kvm_riscv_gstage_gpa_bits(void); + +void kvm_riscv_gstage_vmid_detect(void); +unsigned long kvm_riscv_gstage_vmid_bits(void); +int kvm_riscv_gstage_vmid_init(struct kvm *kvm); +bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); +void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); void __kvm_riscv_unpriv_trap(void); diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index f808ad1ce500..6119368ba6d5 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -82,6 +82,23 @@ struct kvm_riscv_timer { __u64 state; }; +/* + * ISA extension IDs specific to KVM. This is not the same as the host ISA + * extension IDs as that is internal to the host and should not be exposed + * to the guest. This should always be contiguous to keep the mapping simple + * in KVM implementation. + */ +enum KVM_RISCV_ISA_EXT_ID { + KVM_RISCV_ISA_EXT_A = 0, + KVM_RISCV_ISA_EXT_C, + KVM_RISCV_ISA_EXT_D, + KVM_RISCV_ISA_EXT_F, + KVM_RISCV_ISA_EXT_H, + KVM_RISCV_ISA_EXT_I, + KVM_RISCV_ISA_EXT_M, + KVM_RISCV_ISA_EXT_MAX, +}; + /* Possible states for kvm_riscv_timer */ #define KVM_RISCV_TIMER_STATE_OFF 0 #define KVM_RISCV_TIMER_STATE_ON 1 @@ -123,6 +140,9 @@ struct kvm_riscv_timer { #define KVM_REG_RISCV_FP_D_REG(name) \ (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64)) +/* ISA Extension registers are mapped as type 7 */ +#define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) + #endif #endif /* __LINUX_KVM_RISCV_H */ |