summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-06-29 10:30:10 +0100
committerMarc Zyngier <maz@kernel.org>2022-06-29 10:30:10 +0100
commitdc94f89ae68fba71f0423dde3c9956f4d9c47e5d (patch)
tree35fb07c3f37e9c1356274ba97bf8a317c9790e46 /arch/arm64/kvm
parenta111daf0c53ae91e71fd2bfe7497862d14132e3e (diff)
parentb4da91879e98bdd5998ee84f47f02426ac50a729 (diff)
downloadlinux-dc94f89ae68fba71f0423dde3c9956f4d9c47e5d.tar.bz2
Merge branch kvm-arm64/burn-the-flags into kvmarm-master/next
* kvm-arm64/burn-the-flags: : . : Rework the per-vcpu flags to make them more manageable, : splitting them in different sets that have specific : uses: : : - configuration flags : - input to the world-switch : - state bookkeeping for the kernel itself : : The FP tracking is also simplified and tracked outside : of the flags as a separate state. : . KVM: arm64: Move the handling of !FP outside of the fast path KVM: arm64: Document why pause cannot be turned into a flag KVM: arm64: Reduce the size of the vcpu flag members KVM: arm64: Add build-time sanity checks for flags KVM: arm64: Warn when PENDING_EXCEPTION and INCREMENT_PC are set together KVM: arm64: Convert vcpu sysregs_loaded_on_cpu to a state flag KVM: arm64: Kill unused vcpu flags field KVM: arm64: Move vcpu WFIT flag to the state flag set KVM: arm64: Move vcpu ON_UNSUPPORTED_CPU flag to the state flag set KVM: arm64: Move vcpu SVE/SME flags to the state flag set KVM: arm64: Move vcpu debug/SPE/TRBE flags to the input flag set KVM: arm64: Move vcpu PC/Exception flags to the input flag set KVM: arm64: Move vcpu configuration flags into their own set KVM: arm64: Add three sets of flags to the vcpu state KVM: arm64: Add helpers to manipulate vcpu flags among a set KVM: arm64: Move FP state ownership from flag to a tristate KVM: arm64: Drop FP_FOREIGN_STATE from the hypervisor code Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/arch_timer.c2
-rw-r--r--arch/arm64/kvm/arm.c12
-rw-r--r--arch/arm64/kvm/debug.c25
-rw-r--r--arch/arm64/kvm/fpsimd.c39
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp/exception.c23
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h24
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c4
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c4
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/inject_fault.c17
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/arm64/kvm/sys_regs.c12
17 files changed, 90 insertions, 108 deletions
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 3b8d062e30ea..bb24a76b4224 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -242,7 +242,7 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
{
return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
- (vcpu->arch.flags & KVM_ARM64_WFIT));
+ vcpu_get_flag(vcpu, IN_WFIT));
}
static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a0188144a122..8522eb7f5bc8 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -330,6 +330,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+ /*
+ * Default value for the FP state, will be overloaded at load
+ * time if we support FP (pretty likely)
+ */
+ vcpu->arch.fp_state = FP_STATE_FREE;
+
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
@@ -659,7 +665,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
preempt_enable();
kvm_vcpu_halt(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_WFIT;
+ vcpu_clear_flag(vcpu, IN_WFIT);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
preempt_disable();
@@ -1015,8 +1021,8 @@ out:
* the vcpu state. Note that this relies on __kvm_adjust_pc()
* being preempt-safe on VHE.
*/
- if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_INCREMENT_PC)))
+ if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
+ vcpu_get_flag(vcpu, INCREMENT_PC)))
kvm_call_hyp(__kvm_adjust_pc, vcpu);
vcpu_put(vcpu);
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 4fd5c216c4bb..0b28d7db7c76 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -104,11 +104,11 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
* Trap debug register access when one of the following is true:
* - Userspace is using the hardware to debug the guest
* (KVM_GUESTDBG_USE_HW is set).
- * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
+ * - The guest is not using debug (DEBUG_DIRTY clear).
* - The guest has enabled the OS Lock (debug exceptions are blocked).
*/
if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
- !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) ||
+ !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
kvm_vcpu_os_lock_enabled(vcpu))
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
@@ -147,8 +147,8 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* debug related registers.
*
* Additionally, KVM only traps guest accesses to the debug registers if
- * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
- * flag on vcpu->arch.flags). Since the guest must not interfere
+ * the guest is not actively using them (see the DEBUG_DIRTY
+ * flag on vcpu->arch.iflags). Since the guest must not interfere
* with the hardware state when debugging the guest, we must ensure that
* trapping is enabled whenever we are debugging the guest using the
* debug registers.
@@ -205,9 +205,8 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
*
* We simply switch the debug_ptr to point to our new
* external_debug_state which has been populated by the
- * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
- * mechanism ensures the registers are updated on the
- * world switch.
+ * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
+ * the registers are updated on the world switch.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
/* Enable breakpoints/watchpoints */
@@ -216,7 +215,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
- vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu_set_flag(vcpu, DEBUG_DIRTY);
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
&vcpu->arch.debug_ptr->dbg_bcr[0],
@@ -246,7 +245,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
- vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu_set_flag(vcpu, DEBUG_DIRTY);
/* Write mdcr_el2 changes since vcpu_load on VHE systems */
if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
@@ -298,16 +297,16 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
*/
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
- vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
+ vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
- vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
+ vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
}
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
{
- vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
- KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
+ vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
+ vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
}
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 6012b08ecb14..ec8e4494873d 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -77,12 +77,14 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
BUG_ON(!current->mm);
BUG_ON(test_thread_flag(TIF_SVE));
- vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
- vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+ if (!system_supports_fpsimd())
+ return;
+
+ vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
- vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED;
+ vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
- vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
+ vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
/*
* We don't currently support SME guests but if we leave
@@ -94,29 +96,28 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* operations. Do this for ZA as well for now for simplicity.
*/
if (system_supports_sme()) {
- vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED;
+ vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
- vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
+ vcpu_set_flag(vcpu, HOST_SME_ENABLED);
- if (read_sysreg_s(SYS_SVCR) &
- (SVCR_SM_MASK | SVCR_ZA_MASK)) {
- vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
+ if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
+ vcpu->arch.fp_state = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
}
/*
- * Called just before entering the guest once we are no longer
- * preemptable. Syncs the host's TIF_FOREIGN_FPSTATE with the KVM
- * mirror of the flag used by the hypervisor.
+ * Called just before entering the guest once we are no longer preemptable
+ * and interrupts are disabled. If we have managed to run anything using
+ * FP while we were preemptible (such as off the back of an interrupt),
+ * then neither the host nor the guest own the FP hardware (and it was the
+ * responsibility of the code that used FP to save the existing state).
*/
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
{
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
- vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE;
- else
- vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE;
+ vcpu->arch.fp_state = FP_STATE_FREE;
}
/*
@@ -130,7 +131,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
{
WARN_ON_ONCE(!irqs_disabled());
- if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
/*
* Currently we do not support SME guests so SVCR is
* always 0 and we just need a variable to point to.
@@ -163,7 +164,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
- if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
+ if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0,
CPACR_EL1_SMEN_EL0EN |
CPACR_EL1_SMEN_EL1EN);
@@ -173,7 +174,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
CPACR_EL1_SMEN_EL1EN);
}
- if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
@@ -192,7 +193,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
- if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+ if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index f66c0142b335..d045f5b973b9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -120,7 +120,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
if (esr & ESR_ELx_WFx_ISS_WFxT)
- vcpu->arch.flags |= KVM_ARM64_WFIT;
+ vcpu_set_flag(vcpu, IN_WFIT);
kvm_vcpu_wfi(vcpu);
}
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index c5d009715402..b7557b25ed56 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -303,14 +303,14 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
- switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
- case KVM_ARM64_EXCEPT_AA32_UND:
+ switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+ case unpack_vcpu_flag(EXCEPT_AA32_UND):
enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
break;
- case KVM_ARM64_EXCEPT_AA32_IABT:
+ case unpack_vcpu_flag(EXCEPT_AA32_IABT):
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
break;
- case KVM_ARM64_EXCEPT_AA32_DABT:
+ case unpack_vcpu_flag(EXCEPT_AA32_DABT):
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
break;
default:
@@ -318,9 +318,8 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
break;
}
} else {
- switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
- case (KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_EXCEPT_AA64_EL1):
+ switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+ case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
break;
default:
@@ -340,12 +339,12 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
*/
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+ if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
kvm_inject_exception(vcpu);
- vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_EXCEPT_MASK);
- } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+ vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
+ vcpu_clear_flag(vcpu, EXCEPT_MASK);
+ } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
kvm_skip_instr(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+ vcpu_clear_flag(vcpu, INCREMENT_PC);
}
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 4ebe9f558f3a..961bbef104a6 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -132,7 +132,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;
- if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
+ if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
return;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
@@ -151,7 +151,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;
- if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
+ if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
return;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
@@ -162,7 +162,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
__debug_save_state(guest_dbg, guest_ctxt);
__debug_restore_state(host_dbg, host_ctxt);
- vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
+ vcpu_clear_flag(vcpu, DEBUG_DIRTY);
}
#endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 37d9f211c200..6cbbb6c02f66 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -37,22 +37,10 @@ struct kvm_exception_table_entry {
extern struct kvm_exception_table_entry __start___kvm_ex_table;
extern struct kvm_exception_table_entry __stop___kvm_ex_table;
-/* Check whether the FP regs were dirtied while in the host-side run loop: */
-static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
+/* Check whether the FP regs are owned by the guest */
+static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
{
- /*
- * When the system doesn't support FP/SIMD, we cannot rely on
- * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
- * abort on the very first access to FP and thus we should never
- * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
- * trap the accesses.
- */
- if (!system_supports_fpsimd() ||
- vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
- vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
- KVM_ARM64_FP_HOST);
-
- return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
+ return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
}
/* Save the 32-bit only FPSIMD system register state */
@@ -191,10 +179,8 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
- if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
+ if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
- vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
- }
/* Restore the guest state */
if (sve_guest)
@@ -206,7 +192,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
- vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
+ vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
return true;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 7ecca8b07851..baa5b9b3dde5 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -195,7 +195,7 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
- if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
+ if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
}
@@ -212,7 +212,7 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
- if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
+ if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index df361d839902..e17455773b98 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -84,10 +84,10 @@ static void __debug_restore_trace(u64 trfcr_el1)
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
/* Disable and flush Self-Hosted Trace generation */
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
}
@@ -98,9 +98,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6db801db8f27..764bdc423cb8 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -43,7 +43,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
- if (!update_fp_enabled(vcpu)) {
+ if (!guest_owns_fp_regs(vcpu)) {
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
@@ -123,7 +123,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
}
cptr = CPTR_EL2_DEFAULT;
- if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
+ if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
cptr |= CPTR_EL2_TZ;
if (cpus_have_final_cap(ARM64_SME))
cptr &= ~CPTR_EL2_TSM;
@@ -335,7 +335,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(host_ctxt);
- if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 35a4331ba5f3..88c4d488695c 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -38,9 +38,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
- KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
__kvm_adjust_pc(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 969f20daf97a..bce7fc51f9a1 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -55,7 +55,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TAM;
- if (update_fp_enabled(vcpu)) {
+ if (guest_owns_fp_regs(vcpu)) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
} else {
@@ -175,7 +175,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_host_state_vhe(host_ctxt);
- if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 007a12dd4351..7b44f6b3b547 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -79,7 +79,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
__sysreg_restore_user_state(guest_ctxt);
__sysreg_restore_el1_state(guest_ctxt);
- vcpu->arch.sysregs_loaded_on_cpu = true;
+ vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
activate_traps_vhe_load(vcpu);
}
@@ -110,5 +110,5 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
/* Restore host user state */
__sysreg_restore_user_state(host_ctxt);
- vcpu->arch.sysregs_loaded_on_cpu = false;
+ vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 55a5dbe957e0..f32f4a2a347f 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -20,9 +20,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
u64 esr = 0;
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
- KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
@@ -52,9 +50,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
{
u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
- KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
/*
* Build an unknown exception, depending on the instruction
@@ -73,8 +69,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
static void inject_undef32(struct kvm_vcpu *vcpu)
{
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_UND |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
}
/*
@@ -97,14 +92,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
far = vcpu_read_sys_reg(vcpu, FAR_EL1);
if (is_pabt) {
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
far &= GENMASK(31, 0);
far |= (u64)addr << 32;
vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
} else { /* !iabt */
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
far &= GENMASK(63, 32);
far |= addr;
vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 6c70c6f61c70..0e08fbe68715 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -81,7 +81,7 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
* kvm_arm_vcpu_finalize(), which freezes the configuration.
*/
- vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
+ vcpu_set_flag(vcpu, GUEST_HAS_SVE);
return 0;
}
@@ -120,7 +120,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
}
vcpu->arch.sve_state = buf;
- vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
+ vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
return 0;
}
@@ -177,7 +177,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
!system_has_full_ptr_auth())
return -EINVAL;
- vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
+ vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c06c0477fab5..1c562bcfeccf 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -72,7 +72,7 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
{
u64 val = 0x8badf00d8badf00d;
- if (vcpu->arch.sysregs_loaded_on_cpu &&
+ if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
__vcpu_read_sys_reg_from_cpu(reg, &val))
return val;
@@ -81,7 +81,7 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
{
- if (vcpu->arch.sysregs_loaded_on_cpu &&
+ if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
__vcpu_write_sys_reg_to_cpu(val, reg))
return;
@@ -387,7 +387,7 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
{
if (p->is_write) {
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
- vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu_set_flag(vcpu, DEBUG_DIRTY);
} else {
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
}
@@ -403,8 +403,8 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
* A 32 bit write to a debug register leave top bits alone
* A 32 bit read from a debug register only returns the bottom bits
*
- * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
- * hyp.S code switches between host and guest values in future.
+ * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
+ * switches between host and guest values in future.
*/
static void reg_to_dbg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
@@ -420,7 +420,7 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu,
val |= (p->regval & (mask >> shift)) << shift;
*dbg_reg = val;
- vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+ vcpu_set_flag(vcpu, DEBUG_DIRTY);
}
static void dbg_to_reg(struct kvm_vcpu *vcpu,