diff options
Diffstat (limited to 'arch/riscv/kvm/vcpu.c')
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 144 |
1 files changed, 137 insertions, 7 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 7461f964d20a..7f4ad5e4373a 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -67,6 +67,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) if (loaded) kvm_arch_vcpu_put(vcpu); + vcpu->arch.last_exit_cpu = -1; + memcpy(csr, reset_csr, sizeof(*csr)); memcpy(cntx, reset_cntx, sizeof(*cntx)); @@ -78,6 +80,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) WRITE_ONCE(vcpu->arch.irqs_pending, 0); WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); + vcpu->arch.hfence_head = 0; + vcpu->arch.hfence_tail = 0; + memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); + /* Reset the guest CSRs for hotplug usecase */ if (loaded) kvm_arch_vcpu_load(vcpu, smp_processor_id()); @@ -101,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Setup ISA features available to VCPU */ vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; + /* Setup VCPU hfence queue */ + spin_lock_init(&vcpu->arch.hfence_lock); + /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ cntx = &vcpu->arch.guest_reset_context; cntx->sstatus = SR_SPP | SR_SPIE; @@ -137,7 +146,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) /* Cleanup VCPU timer */ kvm_riscv_vcpu_timer_deinit(vcpu); - /* Free unused pages pre-allocated for Stage2 page table mappings */ + /* Free unused pages pre-allocated for G-stage page table mappings */ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } @@ -365,6 +374,101 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, return 0; } +/* Mapping between KVM ISA Extension ID & Host ISA extension ID */ +static unsigned long kvm_isa_ext_arr[] = { + RISCV_ISA_EXT_a, + RISCV_ISA_EXT_c, + RISCV_ISA_EXT_d, + RISCV_ISA_EXT_f, + RISCV_ISA_EXT_h, + RISCV_ISA_EXT_i, + RISCV_ISA_EXT_m, +}; + +static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_ISA_EXT); + unsigned long reg_val = 0; + unsigned long host_isa_ext; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) + return -EINVAL; + + host_isa_ext = kvm_isa_ext_arr[reg_num]; + if (__riscv_isa_extension_available(&vcpu->arch.isa, host_isa_ext)) + reg_val = 1; /* Mark the given extension as available */ + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_ISA_EXT); + unsigned long reg_val; + unsigned long host_isa_ext; + unsigned long host_isa_ext_mask; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + host_isa_ext = kvm_isa_ext_arr[reg_num]; + if (!__riscv_isa_extension_available(NULL, host_isa_ext)) + return -EOPNOTSUPP; + + if (host_isa_ext >= RISCV_ISA_EXT_BASE && + host_isa_ext < RISCV_ISA_EXT_MAX) { + /* + * Multi-letter ISA extension. Currently there is no provision + * to enable/disable the multi-letter ISA extensions for guests. + * Return success if the request is to enable any ISA extension + * that is available in the hardware. + * Return -EOPNOTSUPP otherwise. + */ + if (!reg_val) + return -EOPNOTSUPP; + else + return 0; + } + + /* Single letter base ISA extension */ + if (!vcpu->arch.ran_atleast_once) { + host_isa_ext_mask = BIT_MASK(host_isa_ext); + if (!reg_val && (host_isa_ext_mask & KVM_RISCV_ISA_DISABLE_ALLOWED)) + vcpu->arch.isa &= ~host_isa_ext_mask; + else + vcpu->arch.isa |= host_isa_ext_mask; + vcpu->arch.isa &= riscv_isa_extension_base(NULL); + vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; + kvm_riscv_vcpu_fp_reset(vcpu); + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -382,6 +486,8 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) + return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); return -EINVAL; } @@ -403,6 +509,8 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) + return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); return -EINVAL; } @@ -635,7 +743,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) csr_write(CSR_HVIP, csr->hvip); csr_write(CSR_VSATP, csr->vsatp); - kvm_riscv_stage2_update_hgatp(vcpu); + kvm_riscv_gstage_update_hgatp(vcpu); kvm_riscv_vcpu_timer_restore(vcpu); @@ -690,10 +798,23 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) kvm_riscv_reset_vcpu(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) - kvm_riscv_stage2_update_hgatp(vcpu); + kvm_riscv_gstage_update_hgatp(vcpu); - if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) - __kvm_riscv_hfence_gvma_all(); + if (kvm_check_request(KVM_REQ_FENCE_I, vcpu)) + kvm_riscv_fence_i_process(vcpu); + + /* + * The generic KVM_REQ_TLB_FLUSH is same as + * KVM_REQ_HFENCE_GVMA_VMID_ALL + */ + if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu)) + kvm_riscv_hfence_gvma_vmid_all_process(vcpu); + + if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu)) + kvm_riscv_hfence_vvma_all_process(vcpu); + + if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) + kvm_riscv_hfence_process(vcpu); } } @@ -715,6 +836,7 @@ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) { guest_state_enter_irqoff(); __kvm_riscv_switch_to(&vcpu->arch); + vcpu->arch.last_exit_cpu = vcpu->cpu; guest_state_exit_irqoff(); } @@ -762,7 +884,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Check conditions before entering the guest */ cond_resched(); - kvm_riscv_stage2_vmid_update(vcpu); + kvm_riscv_gstage_vmid_update(vcpu); kvm_riscv_check_vcpu_requests(vcpu); @@ -800,7 +922,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_riscv_update_hvip(vcpu); if (ret <= 0 || - kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) || + kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || kvm_request_pending(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; local_irq_enable(); @@ -809,6 +931,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) continue; } + /* + * Cleanup stale TLB enteries + * + * Note: This should be done after G-stage VMID has been + * updated using kvm_riscv_gstage_vmid_ver_changed() + */ + kvm_riscv_local_tlb_sanitize(vcpu); + guest_timing_enter_irqoff(); kvm_riscv_vcpu_enter_exit(vcpu); |