summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 11:34:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-12 11:34:04 -0700
commitb357bf6023a948cf6a9472f07a1b0caac0e4f8e8 (patch)
tree1471a2691cd56e8640cf6ad51e255b54903a164b /arch/arm64/kvm
parent0725d4e1b8b08a60838db3a6e65c23ea8824a048 (diff)
parent766d3571d8e50d3a73b77043dc632226f9e6b389 (diff)
downloadlinux-b357bf6023a948cf6a9472f07a1b0caac0e4f8e8.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small update for KVM: ARM: - lazy context-switching of FPSIMD registers on arm64 - "split" regions for vGIC redistributor s390: - cleanups for nested - clock handling - crypto - storage keys - control register bits x86: - many bugfixes - implement more Hyper-V super powers - implement lapic_timer_advance_ns even when the LAPIC timer is emulated using the processor's VMX preemption timer. - two security-related bugfixes at the top of the branch" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (79 commits) kvm: fix typo in flag name kvm: x86: use correct privilege level for sgdt/sidt/fxsave/fxrstor access KVM: x86: pass kvm_vcpu to kvm_read_guest_virt and kvm_write_guest_virt_system KVM: x86: introduce linear_{read,write}_system kvm: nVMX: Enforce cpl=0 for VMX instructions kvm: nVMX: Add support for "VMWRITE to any supported field" kvm: nVMX: Restrict VMX capability MSR changes KVM: VMX: Optimize tscdeadline timer latency KVM: docs: nVMX: Remove known limitations as they do not exist now KVM: docs: mmu: KVM support exposing SLAT to guests kvm: no need to check return value of debugfs_create functions kvm: Make VM ioctl do valloc for some archs kvm: Change return type to vm_fault_t KVM: docs: mmu: Fix link to NPT presentation from KVM Forum 2008 kvm: x86: Amend the KVM_GET_SUPPORTED_CPUID API documentation KVM: x86: hyperv: declare KVM_CAP_HYPERV_TLBFLUSH capability KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}_EX implementation KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE} implementation KVM: introduce kvm_make_vcpus_request_mask() API KVM: x86: hyperv: do rep check for each hypercall separately ...
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/debug.c8
-rw-r--r--arch/arm64/kvm/fpsimd.c110
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c6
-rw-r--r--arch/arm64/kvm/hyp/entry.S43
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S19
-rw-r--r--arch/arm64/kvm/hyp/switch.c124
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c9
10 files changed, 211 insertions, 115 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a2e3a5af1113..47b23bf617c7 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -39,6 +39,7 @@ config KVM
select HAVE_KVM_IRQ_ROUTING
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
+ select HAVE_KVM_VCPU_RUN_PID_CHANGE
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 93afff91cb7c..0f2a135ba15b 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -19,7 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index a1f4ebdfe6d3..00d422336a45 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -103,7 +103,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
- * flag on vcpu->arch.debug_flags). Since the guest must not interfere
+ * flag on vcpu->arch.flags). Since the guest must not interfere
* with the hardware state when debugging the guest, we must ensure that
* trapping is enabled whenever we are debugging the guest using the
* debug registers.
@@ -111,7 +111,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
- bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+ bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
unsigned long mdscr;
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
@@ -184,7 +184,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
trap_debug = true;
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
@@ -206,7 +206,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
new file mode 100644
index 000000000000..dc6ecfa5a2d2
--- /dev/null
+++ b/arch/arm64/kvm/fpsimd.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers
+ *
+ * Copyright 2018 Arm Limited
+ * Author: Dave Martin <Dave.Martin@arm.com>
+ */
+#include <linux/bottom_half.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_mmu.h>
+
+/*
+ * Called on entry to KVM_RUN unless this vcpu previously ran at least
+ * once and the most recent prior KVM_RUN for this vcpu was called from
+ * the same task as current (highly likely).
+ *
+ * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
+ * such that on entering hyp the relevant parts of current are already
+ * mapped.
+ */
+int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ struct thread_info *ti = &current->thread_info;
+ struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
+
+ /*
+ * Make sure the host task thread flags and fpsimd state are
+ * visible to hyp:
+ */
+ ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
+ if (ret)
+ goto error;
+
+ ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP);
+ if (ret)
+ goto error;
+
+ vcpu->arch.host_thread_info = kern_hyp_va(ti);
+ vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+error:
+ return ret;
+}
+
+/*
+ * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
+ * The actual loading is done by the FPSIMD access trap taken to hyp.
+ *
+ * Here, we just set the correct metadata to indicate that the FPSIMD
+ * state in the cpu regs (if any) belongs to current on the host.
+ *
+ * TIF_SVE is backed up here, since it may get clobbered with guest state.
+ * This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
+ */
+void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
+{
+ BUG_ON(!current->mm);
+
+ vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+ vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+ if (test_thread_flag(TIF_SVE))
+ vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+}
+
+/*
+ * If the guest FPSIMD state was loaded, update the host's context
+ * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
+ * so that they will be written back if the kernel clobbers them due to
+ * kernel-mode NEON before re-entry into the guest.
+ */
+void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
+{
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ clear_thread_flag(TIF_SVE);
+ }
+}
+
+/*
+ * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
+ * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
+ * disappears and another task or vcpu appears that recycles the same
+ * struct fpsimd_state.
+ */
+void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
+{
+ local_bh_disable();
+
+ update_thread_flag(TIF_SVE,
+ vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+ if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ /* Clean guest FP state to memory and invalidate cpu view */
+ fpsimd_save();
+ fpsimd_flush_cpu_state();
+ } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ /* Ensure user trap controls are correctly restored */
+ fpsimd_bind_task_to_cpu();
+ }
+
+ local_bh_enable();
+}
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 3e717f66f011..50009766e5e5 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -163,7 +163,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!has_vhe())
__debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);
- if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+ if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!has_vhe())
__debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
- if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+ if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
@@ -196,7 +196,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
__debug_save_state(vcpu, guest_dbg, guest_ctxt);
__debug_restore_state(vcpu, host_dbg, host_ctxt);
- vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
}
u32 __hyp_text __kvm_get_mdcr_el2(void)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index e41a161d313a..fad1e164fe48 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -166,46 +166,3 @@ abort_guest_exit_end:
orr x0, x0, x5
1: ret
ENDPROC(__guest_exit)
-
-ENTRY(__fpsimd_guest_restore)
- // x0: esr
- // x1: vcpu
- // x2-x29,lr: vcpu regs
- // vcpu x0-x1 on the stack
- stp x2, x3, [sp, #-16]!
- stp x4, lr, [sp, #-16]!
-
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x2, cptr_el2
- bic x2, x2, #CPTR_EL2_TFP
- msr cptr_el2, x2
-alternative_else
- mrs x2, cpacr_el1
- orr x2, x2, #CPACR_EL1_FPEN
- msr cpacr_el1, x2
-alternative_endif
- isb
-
- mov x3, x1
-
- ldr x0, [x3, #VCPU_HOST_CONTEXT]
- kern_hyp_va x0
- add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
- bl __fpsimd_save_state
-
- add x2, x3, #VCPU_CONTEXT
- add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
- bl __fpsimd_restore_state
-
- // Skip restoring fpexc32 for AArch64 guests
- mrs x1, hcr_el2
- tbnz x1, #HCR_RW_SHIFT, 1f
- ldr x4, [x3, #VCPU_FPEXC32_EL2]
- msr fpexc32_el2, x4
-1:
- ldp x4, lr, [sp], #16
- ldp x2, x3, [sp], #16
- ldp x0, x1, [sp], #16
-
- eret
-ENDPROC(__fpsimd_guest_restore)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 05d836979032..24b4fbafe3e4 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -149,25 +149,6 @@ wa_epilogue:
el1_trap:
get_vcpu_ptr x1, x0
-
- mrs x0, esr_el2
- lsr x0, x0, #ESR_ELx_EC_SHIFT
- /*
- * x0: ESR_EC
- * x1: vcpu pointer
- */
-
- /*
- * We trap the first access to the FP/SIMD to save the host context
- * and restore the guest context lazily.
- * If FP/SIMD is not implemented, handle the trap and inject an
- * undefined instruction exception to the guest.
- */
-alternative_if_not ARM64_HAS_NO_FPSIMD
- cmp x0, #ESR_ELx_EC_FP_ASIMD
- b.eq __fpsimd_guest_restore
-alternative_else_nop_endif
-
mov x0, #ARM_EXCEPTION_TRAP
b __guest_exit
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index c50cedc447f1..d496ef579859 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,21 +22,25 @@
#include <kvm/arm_psci.h>
+#include <asm/cpufeature.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
+#include <asm/kvm_host.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
-static bool __hyp_text __fpsimd_enabled_nvhe(void)
+/* Check whether the FP regs were dirtied while in the host-side run loop: */
+static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
- return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
-}
+ if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+ vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+ KVM_ARM64_FP_HOST);
-static bool fpsimd_enabled_vhe(void)
-{
- return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
+ return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
}
/* Save the 32-bit only FPSIMD system register state */
@@ -93,7 +97,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
+ val &= ~CPACR_EL1_ZEN;
+ if (!update_fp_enabled(vcpu))
+ val &= ~CPACR_EL1_FPEN;
+
write_sysreg(val, cpacr_el1);
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
@@ -106,7 +113,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
+ if (!update_fp_enabled(vcpu))
+ val |= CPTR_EL2_TFP;
+
write_sysreg(val, cptr_el2);
}
@@ -319,6 +329,50 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
}
}
+static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
+{
+ struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
+
+ if (has_vhe())
+ write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
+ cpacr_el1);
+ else
+ write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
+ cptr_el2);
+
+ isb();
+
+ if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
+ /*
+ * In the SVE case, VHE is assumed: it is enforced by
+ * Kconfig and kvm_arch_init().
+ */
+ if (system_supports_sve() &&
+ (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
+ struct thread_struct *thread = container_of(
+ host_fpsimd,
+ struct thread_struct, uw.fpsimd_state);
+
+ sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
+ } else {
+ __fpsimd_save_state(host_fpsimd);
+ }
+
+ vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
+ }
+
+ __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+
+ /* Skip restoring fpexc32 for AArch64 guests */
+ if (!(read_sysreg(hcr_el2) & HCR_RW))
+ write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
+ fpexc32_el2);
+
+ vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
+
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -335,11 +389,23 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* same PC once the SError has been injected, and replay the
* trapping instruction.
*/
- if (*exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
+ if (*exit_code != ARM_EXCEPTION_TRAP)
+ goto exit;
+
+ /*
+ * We trap the first access to the FP/SIMD to save the host context
+ * and restore the guest context lazily.
+ * If FP/SIMD is not implemented, handle the trap and inject an
+ * undefined instruction exception to the guest.
+ */
+ if (system_supports_fpsimd() &&
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD)
+ return __hyp_switch_fpsimd(vcpu);
+
+ if (!__populate_fault_info(vcpu))
return true;
- if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
- *exit_code == ARM_EXCEPTION_TRAP) {
+ if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
@@ -351,12 +417,8 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
- if (ret == 1) {
- if (__skip_instr(vcpu))
- return true;
- else
- *exit_code = ARM_EXCEPTION_TRAP;
- }
+ if (ret == 1 && __skip_instr(vcpu))
+ return true;
if (ret == -1) {
/* Promote an illegal access to an
@@ -369,23 +431,21 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
*exit_code = ARM_EXCEPTION_EL1_SERROR;
}
+
+ goto exit;
}
}
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
- *exit_code == ARM_EXCEPTION_TRAP &&
(kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
int ret = __vgic_v3_perform_cpuif_access(vcpu);
- if (ret == 1) {
- if (__skip_instr(vcpu))
- return true;
- else
- *exit_code = ARM_EXCEPTION_TRAP;
- }
+ if (ret == 1 && __skip_instr(vcpu))
+ return true;
}
+exit:
/* Return to the host kernel and handle the exit */
return false;
}
@@ -428,7 +488,6 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
- bool fp_enabled;
u64 exit_code;
host_ctxt = vcpu->arch.host_cpu_context;
@@ -454,19 +513,14 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__set_host_arch_workaround_state(vcpu);
- fp_enabled = fpsimd_enabled_vhe();
-
sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
- if (fp_enabled) {
- __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
- __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
+ if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
- }
__debug_switch_to_host(vcpu);
@@ -478,7 +532,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
- bool fp_enabled;
u64 exit_code;
vcpu = kern_hyp_va(vcpu);
@@ -514,8 +567,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__set_host_arch_workaround_state(vcpu);
- fp_enabled = __fpsimd_enabled_nvhe();
-
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
@@ -526,11 +577,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(host_ctxt);
- if (fp_enabled) {
- __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
- __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
+ if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
- }
/*
* This must come after restoring the host sysregs, since a non-VHE
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index b3894df6bf1a..35bc16832efe 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -196,7 +196,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
- if (has_vhe() || vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
}
@@ -218,7 +218,7 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
- if (has_vhe() || vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 6e3b969391fd..a4363735d3f8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -31,7 +31,6 @@
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
@@ -338,7 +337,7 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
{
if (p->is_write) {
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
}
@@ -369,7 +368,7 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu,
}
*dbg_reg = val;
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
}
static void dbg_to_reg(struct kvm_vcpu *vcpu,
@@ -1441,7 +1440,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
{
if (p->is_write) {
vcpu_cp14(vcpu, r->reg) = p->regval;
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
p->regval = vcpu_cp14(vcpu, r->reg);
}
@@ -1473,7 +1472,7 @@ static bool trap_xvr(struct kvm_vcpu *vcpu,
val |= p->regval << 32;
*dbg_reg = val;
- vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
p->regval = *dbg_reg >> 32;
}