summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/va_layout.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2022-09-12 17:22:04 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2022-09-16 17:15:02 +0100
commit34bbfdfb146be3ad485cbe526b2a1e9ce220649c (patch)
tree7333a23b43ef3f0cfee878438b4c3d5760a46175 /arch/arm64/kvm/va_layout.c
parent92b4b5619f12770d455ae639ed4e773bdf01aff6 (diff)
downloadlinux-34bbfdfb146be3ad485cbe526b2a1e9ce220649c.tar.bz2
arm64: alternatives: kvm: prepare for cap changes
The KVM patching callbacks use cpus_have_final_cap() internally within has_vhe(), and subsequent patches will make it invalid to call cpus_have_final_cap() before alternatives patching has completed, and will mean that cpus_have_const_cap() will always fall back to dynamic checks prior to alternatives patching. In preparation for said change, this patch modifies the KVM patching callbacks to use cpus_have_cap() directly. This is not subject to patching, and will dynamically check the cpu_hwcaps array, which is functionally equivalent to the existing behaviour. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20220912162210.3626215-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kvm/va_layout.c')
-rw-r--r--arch/arm64/kvm/va_layout.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index acdb7b3cc97d..91b22a014610 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -169,7 +169,7 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
* dictates it and we don't have any spare bits in the
* address), NOP everything after masking the kernel VA.
*/
- if (has_vhe() || (!tag_val && i > 0)) {
+ if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN) || (!tag_val && i > 0)) {
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
continue;
}
@@ -193,7 +193,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
BUG_ON(nr_inst != 4);
- if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
+ if (!cpus_have_cap(ARM64_SPECTRE_V3A) ||
+ WARN_ON_ONCE(cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)))
return;
/*