From 38abf22e122e00d20e99408fce4471b5cb65133b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:06 +0100 Subject: KVM: arm64: Delete orphaned declaration for __fpsimd_enabled() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __fpsimd_enabled() no longer exists, but a dangling declaration has survived in kvm_hyp.h. This patch gets rid of it. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4da765f2cca5..ef8b8394d3d1 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -149,7 +149,6 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -bool __fpsimd_enabled(void); void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void deactivate_traps_vhe_put(void); -- cgit v1.2.3 From 3f61f40947e88b069ac1103727c81582d6e91dea Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:08 +0100 Subject: KVM: arm64: Add missing #includes to kvm_host.h kvm_host.h uses some declarations from other headers that are currently included by accident, without an explicit #include. This patch adds a few #includes that are clearly missing. Although the header builds without them today, this should help to avoid future surprises. Signed-off-by: Dave Martin Acked-by: Mark Rutland Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a01fe087e022..6d10100ff870 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -22,9 +22,13 @@ #ifndef __ARM64_KVM_HOST_H__ #define __ARM64_KVM_HOST_H__ +#include #include +#include #include +#include #include +#include #include #include #include -- cgit v1.2.3 From d06b76be8dad2e2fa62f68264887cc295d67a3ef Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:10 +0100 Subject: arm64/sve: Check SVE virtualisability Due to the way the effective SVE vector length is controlled and trapped at different exception levels, certain mismatches in the sets of vector lengths supported by different physical CPUs in the system may prevent straightforward virtualisation of SVE at parity with the host. This patch analyses the extent to which SVE can be virtualised safely without interfering with migration of vcpus between physical CPUs, and rejects late secondary CPUs that would erode the situation further. It is left up to KVM to decide what to do with this information. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/fpsimd.c | 86 ++++++++++++++++++++++++++++++++++------- 3 files changed, 73 insertions(+), 16 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index dd1ad3950ef5..964adc9f312d 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -87,6 +87,7 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); extern int __ro_after_init sve_max_vl; +extern int __ro_after_init sve_max_virtualisable_vl; #ifdef CONFIG_ARM64_SVE diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4061de10cea6..7f8cc51f0740 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1863,7 +1863,7 @@ static void verify_sve_features(void) unsigned int len = zcr & ZCR_ELx_LEN_MASK; if (len < safe_len || sve_verify_vq_map()) { - pr_crit("CPU%d: SVE: required vector length(s) missing\n", + pr_crit("CPU%d: SVE: vector length support mismatch\n", smp_processor_id()); cpu_die_early(); } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index f59ea677cd42..b219796a4081 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -48,6 +49,7 @@ #include #include #include +#include #define FPEXC_IOF (1 << 0) #define FPEXC_DZF (1 << 1) @@ -130,14 +132,18 @@ static int sve_default_vl = -1; /* Maximum supported vector length across all CPUs (initially poisoned) */ int __ro_after_init sve_max_vl = SVE_VL_MIN; +int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; /* Set of available vector lengths, as vq_to_bit(vq): */ static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +/* Set of vector lengths present on at least one cpu: */ +static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); static void __percpu *efi_sve_state; #else /* ! CONFIG_ARM64_SVE */ /* Dummy declaration for code that will be optimised out: */ extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); extern void __percpu *efi_sve_state; #endif /* ! CONFIG_ARM64_SVE */ @@ -623,12 +629,6 @@ int sve_get_current_vl(void) return sve_prctl_status(0); } -/* - * Bitmap for temporary storage of the per-CPU set of supported vector lengths - * during secondary boot. - */ -static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); - static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) { unsigned int vq, vl; @@ -654,6 +654,7 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) void __init sve_init_vq_map(void) { sve_probe_vqs(sve_vq_map); + bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); } /* @@ -663,8 +664,11 @@ void __init sve_init_vq_map(void) */ void sve_update_vq_map(void) { - sve_probe_vqs(sve_secondary_vq_map); - bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + + sve_probe_vqs(tmp_map); + bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); + bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); } /* @@ -673,18 +677,48 @@ void sve_update_vq_map(void) */ int sve_verify_vq_map(void) { - int ret = 0; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + unsigned long b; - sve_probe_vqs(sve_secondary_vq_map); - bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, - SVE_VQ_MAX); - if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { + sve_probe_vqs(tmp_map); + + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); + if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { pr_warn("SVE: cpu%d: Required vector length(s) missing\n", smp_processor_id()); - ret = -EINVAL; + return -EINVAL; } - return ret; + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) + return 0; + + /* + * For KVM, it is necessary to ensure that this CPU doesn't + * support any vector length that guests may have probed as + * unsupported. + */ + + /* Recover the set of supported VQs: */ + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); + /* Find VQs supported that are not globally supported: */ + bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); + + /* Find the lowest such VQ, if any: */ + b = find_last_bit(tmp_map, SVE_VQ_MAX); + if (b >= SVE_VQ_MAX) + return 0; /* no mismatches */ + + /* + * Mismatches above sve_max_virtualisable_vl are fine, since + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: + */ + if (sve_vl_from_vq(bit_to_vq(b)) <= sve_max_virtualisable_vl) { + pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", + smp_processor_id()); + return -EINVAL; + } + + return 0; } static void __init sve_efi_setup(void) @@ -751,6 +785,8 @@ u64 read_zcr_features(void) void __init sve_setup(void) { u64 zcr; + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); + unsigned long b; if (!system_supports_sve()) return; @@ -779,11 +815,31 @@ void __init sve_setup(void) */ sve_default_vl = find_supported_vector_length(64); + bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, + SVE_VQ_MAX); + + b = find_last_bit(tmp_map, SVE_VQ_MAX); + if (b >= SVE_VQ_MAX) + /* No non-virtualisable VLs found */ + sve_max_virtualisable_vl = SVE_VQ_MAX; + else if (WARN_ON(b == SVE_VQ_MAX - 1)) + /* No virtualisable VLs? This is architecturally forbidden. */ + sve_max_virtualisable_vl = SVE_VQ_MIN; + else /* b + 1 < SVE_VQ_MAX */ + sve_max_virtualisable_vl = sve_vl_from_vq(bit_to_vq(b + 1)); + + if (sve_max_virtualisable_vl > sve_max_vl) + sve_max_virtualisable_vl = sve_max_vl; + pr_info("SVE: maximum available vector length %u bytes per vector\n", sve_max_vl); pr_info("SVE: default vector length %u bytes per vector\n", sve_default_vl); + /* KVM decides whether to support mismatched systems. Just warn here: */ + if (sve_max_virtualisable_vl < sve_max_vl) + pr_warn("SVE: unvirtualisable vector lengths present\n"); + sve_efi_setup(); } -- cgit v1.2.3 From 0495067420f352a0b8ed37ee412d7dd8e7b95c61 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:11 +0100 Subject: arm64/sve: Enable SVE state tracking for non-task contexts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current FPSIMD/SVE context handling support for non-task (i.e., KVM vcpu) contexts does not take SVE into account. This means that only task contexts can safely use SVE at present. In preparation for enabling KVM guests to use SVE, it is necessary to keep track of SVE state for non-task contexts too. This patch adds the necessary support, removing assumptions from the context switch code about the location of the SVE context storage. When binding a vcpu context, its vector length is arbitrarily specified as SVE_VL_MIN for now. In any case, because TIF_SVE is presently cleared at vcpu context bind time, the specified vector length will not be used for anything yet. In later patches TIF_SVE will be set here as appropriate, and the appropriate maximum vector length for the vcpu will be passed when binding. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Reviewed-by: Julien Grall Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/fpsimd.h | 3 ++- arch/arm64/kernel/fpsimd.c | 20 +++++++++++++++----- arch/arm64/kvm/fpsimd.c | 5 ++++- 3 files changed, 21 insertions(+), 7 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 964adc9f312d..df7a14305222 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -56,7 +56,8 @@ extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_bind_task_to_cpu(void); -extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state); +extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, + void *sve_state, unsigned int sve_vl); extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_flush_cpu_state(void); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index b219796a4081..8a93afa78600 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -121,6 +121,8 @@ */ struct fpsimd_last_state_struct { struct user_fpsimd_state *st; + void *sve_state; + unsigned int sve_vl; }; static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); @@ -241,14 +243,15 @@ static void task_fpsimd_load(void) */ void fpsimd_save(void) { - struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); + struct fpsimd_last_state_struct const *last = + this_cpu_ptr(&fpsimd_last_state); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ WARN_ON(!in_softirq() && !irqs_disabled()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { if (system_supports_sve() && test_thread_flag(TIF_SVE)) { - if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { + if (WARN_ON(sve_get_vl() != last->sve_vl)) { /* * Can't save the user regs, so current would * re-enter user with corrupt state. @@ -258,9 +261,11 @@ void fpsimd_save(void) return; } - sve_save_state(sve_pffr(¤t->thread), &st->fpsr); + sve_save_state((char *)last->sve_state + + sve_ffr_offset(last->sve_vl), + &last->st->fpsr); } else - fpsimd_save_state(st); + fpsimd_save_state(last->st); } } @@ -1034,6 +1039,8 @@ void fpsimd_bind_task_to_cpu(void) this_cpu_ptr(&fpsimd_last_state); last->st = ¤t->thread.uw.fpsimd_state; + last->sve_state = current->thread.sve_state; + last->sve_vl = current->thread.sve_vl; current->thread.fpsimd_cpu = smp_processor_id(); if (system_supports_sve()) { @@ -1047,7 +1054,8 @@ void fpsimd_bind_task_to_cpu(void) } } -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, + unsigned int sve_vl) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); @@ -1055,6 +1063,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) WARN_ON(!in_softirq() && !irqs_disabled()); last->st = st; + last->sve_state = sve_state; + last->sve_vl = sve_vl; } /* diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index aac7808ce216..1cf4f0269471 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -85,7 +86,9 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) WARN_ON_ONCE(!irqs_disabled()); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs); + fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, + NULL, SVE_VL_MIN); + clear_thread_flag(TIF_FOREIGN_FPSTATE); clear_thread_flag(TIF_SVE); } -- cgit v1.2.3 From 1765edbab16e3dc73367bda04e45337cea3e51a0 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:12 +0100 Subject: KVM: arm64: Add a vcpu flag to control SVE visibility for the guest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since SVE will be enabled or disabled on a per-vcpu basis, a flag is needed in order to track which vcpus have it enabled. This patch adds a suitable flag and a helper for checking it. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6d10100ff870..ad4f7f004498 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -328,6 +328,10 @@ struct kvm_vcpu_arch { #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ +#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ + +#define vcpu_has_sve(vcpu) (system_supports_sve() && \ + ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) -- cgit v1.2.3 From 73433762fcaeb9d59e84d299021c6b15466c96dd Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:16 +0100 Subject: KVM: arm64/sve: System register context switch and access support This patch adds the necessary support for context switching ZCR_EL1 for each vcpu. ZCR_EL1 is trapped alongside the FPSIMD/SVE registers, so it makes sense for it to be handled as part of the guest FPSIMD/SVE context for context switch purposes instead of handling it as a general system register. This means that it can be switched in lazily at the appropriate time. No effort is made to track host context for this register, since SVE requires VHE: thus the hosts's value for this register lives permanently in ZCR_EL2 and does not alias the guest's value at any time. The Hyp switch and fpsimd context handling code is extended appropriately. Accessors are added in sys_regs.c to expose the SVE system registers and ID register fields. Because these need to be conditionally visible based on the guest configuration, they are implemented separately for now rather than by use of the generic system register helpers. This may be abstracted better later on when/if there are more features requiring this model. ID_AA64ZFR0_EL1 is RO-RAZ for MRS/MSR when SVE is disabled for the guest, but for compatibility with non-SVE aware KVM implementations the register should not be enumerated at all for KVM_GET_REG_LIST in this case. For consistency we also reject ioctl access to the register. This ensures that a non-SVE-enabled guest looks the same to userspace, irrespective of whether the kernel KVM implementation supports SVE. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/sysreg.h | 3 ++ arch/arm64/kvm/fpsimd.c | 9 ++++- arch/arm64/kvm/hyp/switch.c | 3 ++ arch/arm64/kvm/sys_regs.c | 83 ++++++++++++++++++++++++++++++++++++--- 5 files changed, 93 insertions(+), 6 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index ad4f7f004498..22cf484b561f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -121,6 +121,7 @@ enum vcpu_sysreg { SCTLR_EL1, /* System Control Register */ ACTLR_EL1, /* Auxiliary Control Register */ CPACR_EL1, /* Coprocessor Access Control */ + ZCR_EL1, /* SVE Control */ TTBR0_EL1, /* Translation Table Base Register 0 */ TTBR1_EL1, /* Translation Table Base Register 1 */ TCR_EL1, /* Translation Control Register */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5b267dec6194..4d6262df79bb 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -454,6 +454,9 @@ #define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) +/* VHE encodings for architectural EL0/1 system registers */ +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) + /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_DSSBS (_BITUL(44)) #define SCTLR_ELx_ENIA (_BITUL(31)) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 1cf4f0269471..7053bf402131 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -103,14 +103,21 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { unsigned long flags; + bool host_has_sve = system_supports_sve(); + bool guest_has_sve = vcpu_has_sve(vcpu); local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { + u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1]; + /* Clean guest FP state to memory and invalidate cpu view */ fpsimd_save(); fpsimd_flush_cpu_state(); - } else if (system_supports_sve()) { + + if (guest_has_sve) + *guest_zcr = read_sysreg_s(SYS_ZCR_EL12); + } else if (host_has_sve) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 3563fe655cd5..9d46066276b9 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -351,6 +351,9 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); + if (vcpu_has_sve(vcpu)) + write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); + /* Skip restoring fpexc32 for AArch64 guests */ if (!(read_sysreg(hcr_el2) & HCR_RW)) write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2], diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c86a7b0d3e6b..09e9b0625911 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1051,10 +1051,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1) { - if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) - kvm_debug("SVE unsupported for guests, suppressing\n"); - + if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1) { const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) | @@ -1101,6 +1098,81 @@ static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); static u64 sys_reg_to_index(const struct sys_reg_desc *reg); +/* Visibility overrides for SVE-specific control registers */ +static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (vcpu_has_sve(vcpu)) + return 0; + + return REG_HIDDEN_USER | REG_HIDDEN_GUEST; +} + +/* Visibility overrides for SVE-specific ID registers */ +static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (vcpu_has_sve(vcpu)) + return 0; + + return REG_HIDDEN_USER; +} + +/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */ +static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu) +{ + if (!vcpu_has_sve(vcpu)) + return 0; + + return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1); +} + +static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + if (p->is_write) + return write_to_read_only(vcpu, p, rd); + + p->regval = guest_id_aa64zfr0_el1(vcpu); + return true; +} + +static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + u64 val; + + if (!vcpu_has_sve(vcpu)) + return -ENOENT; + + val = guest_id_aa64zfr0_el1(vcpu); + return reg_to_user(uaddr, &val, reg->id); +} + +static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + const u64 id = sys_reg_to_index(rd); + int err; + u64 val; + + if (!vcpu_has_sve(vcpu)) + return -ENOENT; + + err = reg_from_user(&val, uaddr, id); + if (err) + return err; + + /* This is what we mean by invariant: you can't change it. */ + if (val != guest_id_aa64zfr0_el1(vcpu)) + return -EINVAL; + + return 0; +} + /* * cpufeature ID register user accessors * @@ -1346,7 +1418,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_SANITISED(ID_AA64PFR1_EL1), ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), - ID_UNALLOCATED(4,4), + { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility }, ID_UNALLOCATED(4,5), ID_UNALLOCATED(4,6), ID_UNALLOCATED(4,7), @@ -1383,6 +1455,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, + { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, -- cgit v1.2.3 From b43b5dd990eb28047dafe639ce44db347496cb56 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:17 +0100 Subject: KVM: arm64/sve: Context switch the SVE registers In order to give each vcpu its own view of the SVE registers, this patch adds context storage via a new sve_state pointer in struct vcpu_arch. An additional member sve_max_vl is also added for each vcpu, to determine the maximum vector length visible to the guest and thus the value to be configured in ZCR_EL2.LEN while the vcpu is active. This also determines the layout and size of the storage in sve_state, which is read and written by the same backend functions that are used for context-switching the SVE state for host tasks. On SVE-enabled vcpus, SVE access traps are now handled by switching in the vcpu's SVE context and disabling the trap before returning to the guest. On other vcpus, the trap is not handled and an exit back to the host occurs, where the handle_sve() fallback path reflects an undefined instruction exception back to the guest, consistently with the behaviour of non-SVE-capable hardware (as was done unconditionally prior to this patch). No SVE handling is added on non-VHE-only paths, since VHE is an architectural and Kconfig prerequisite of SVE. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 6 ++++ arch/arm64/kvm/fpsimd.c | 5 +-- arch/arm64/kvm/hyp/switch.c | 75 +++++++++++++++++++++++++++++---------- 3 files changed, 66 insertions(+), 20 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 22cf484b561f..4fabfd250de8 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -228,6 +228,8 @@ struct vcpu_reset_state { struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; + void *sve_state; + unsigned int sve_max_vl; /* HYP configuration */ u64 hcr_el2; @@ -323,6 +325,10 @@ struct kvm_vcpu_arch { bool sysregs_loaded_on_cpu; }; +/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ +#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ + sve_ffr_offset((vcpu)->arch.sve_max_vl))) + /* vcpu_arch flags field values: */ #define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 7053bf402131..6e3c9c8b2df9 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -87,10 +87,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, - NULL, SVE_VL_MIN); + vcpu->arch.sve_state, + vcpu->arch.sve_max_vl); clear_thread_flag(TIF_FOREIGN_FPSTATE); - clear_thread_flag(TIF_SVE); + update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); } } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 9d46066276b9..5444b9c6fb5c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -100,7 +100,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; val &= ~CPACR_EL1_ZEN; - if (!update_fp_enabled(vcpu)) { + if (update_fp_enabled(vcpu)) { + if (vcpu_has_sve(vcpu)) + val |= CPACR_EL1_ZEN; + } else { val &= ~CPACR_EL1_FPEN; __activate_traps_fpsimd32(vcpu); } @@ -317,16 +320,48 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) return true; } -static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) +/* Check for an FPSIMD/SVE trap and handle as appropriate */ +static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { - struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state; + bool vhe, sve_guest, sve_host; + u8 hsr_ec; - if (has_vhe()) - write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN, - cpacr_el1); - else + if (!system_supports_fpsimd()) + return false; + + if (system_supports_sve()) { + sve_guest = vcpu_has_sve(vcpu); + sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; + vhe = true; + } else { + sve_guest = false; + sve_host = false; + vhe = has_vhe(); + } + + hsr_ec = kvm_vcpu_trap_get_class(vcpu); + if (hsr_ec != ESR_ELx_EC_FP_ASIMD && + hsr_ec != ESR_ELx_EC_SVE) + return false; + + /* Don't handle SVE traps for non-SVE vcpus here: */ + if (!sve_guest) + if (hsr_ec != ESR_ELx_EC_FP_ASIMD) + return false; + + /* Valid trap. Switch the context: */ + + if (vhe) { + u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; + + if (sve_guest) + reg |= CPACR_EL1_ZEN; + + write_sysreg(reg, cpacr_el1); + } else { write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, cptr_el2); + } isb(); @@ -335,24 +370,28 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) * In the SVE case, VHE is assumed: it is enforced by * Kconfig and kvm_arch_init(). */ - if (system_supports_sve() && - (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) { + if (sve_host) { struct thread_struct *thread = container_of( - host_fpsimd, + vcpu->arch.host_fpsimd_state, struct thread_struct, uw.fpsimd_state); - sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr); + sve_save_state(sve_pffr(thread), + &vcpu->arch.host_fpsimd_state->fpsr); } else { - __fpsimd_save_state(host_fpsimd); + __fpsimd_save_state(vcpu->arch.host_fpsimd_state); } vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } - __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); - - if (vcpu_has_sve(vcpu)) + if (sve_guest) { + sve_load_state(vcpu_sve_pffr(vcpu), + &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, + sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); + } else { + __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); + } /* Skip restoring fpexc32 for AArch64 guests */ if (!(read_sysreg(hcr_el2) & HCR_RW)) @@ -388,10 +427,10 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) * and restore the guest context lazily. * If FP/SIMD is not implemented, handle the trap and inject an * undefined instruction exception to the guest. + * Similarly for trapped SVE accesses. */ - if (system_supports_fpsimd() && - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD) - return __hyp_switch_fpsimd(vcpu); + if (__hyp_handle_fpsimd(vcpu)) + return true; if (!__populate_fault_info(vcpu)) return true; -- cgit v1.2.3 From e1c9c98345b356ad2890ac7e9223593cae8b4dba Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:19 +0100 Subject: KVM: arm64/sve: Add SVE support to register access ioctl interface This patch adds the following registers for access via the KVM_{GET,SET}_ONE_REG interface: * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices) * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices) * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices) In order to adapt gracefully to future architectural extensions, the registers are logically divided up into slices as noted above: the i parameter denotes the slice index. This allows us to reserve space in the ABI for future expansion of these registers. However, as of today the architecture does not permit registers to be larger than a single slice, so no code is needed in the kernel to expose additional slices, for now. The code can be extended later as needed to expose them up to a maximum of 32 slices (as carved out in the architecture itself) if they really exist someday. The registers are only visible for vcpus that have SVE enabled. They are not enumerated by KVM_GET_REG_LIST on vcpus that do not have SVE. Accesses to the FPSIMD registers via KVM_REG_ARM_CORE is not allowed for SVE-enabled vcpus: SVE-aware userspace can use the KVM_REG_ARM64_SVE_ZREG() interface instead to access the same register state. This avoids some complex and pointless emulation in the kernel to convert between the two views of these aliased registers. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 14 ++++ arch/arm64/include/uapi/asm/kvm.h | 17 +++++ arch/arm64/kvm/guest.c | 139 ++++++++++++++++++++++++++++++++++---- 3 files changed, 158 insertions(+), 12 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4fabfd250de8..205438a108f6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -329,6 +329,20 @@ struct kvm_vcpu_arch { #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ sve_ffr_offset((vcpu)->arch.sve_max_vl))) +#define vcpu_sve_state_size(vcpu) ({ \ + size_t __size_ret; \ + unsigned int __vcpu_vq; \ + \ + if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ + __size_ret = 0; \ + } else { \ + __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ + __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ + } \ + \ + __size_ret; \ +}) + /* vcpu_arch flags field values: */ #define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 97c3478ee6e7..ced760cc8478 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -226,6 +226,23 @@ struct kvm_vcpu_events { KVM_REG_ARM_FW | ((r) & 0xffff)) #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) +/* SVE registers */ +#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) + +/* Z- and P-regs occupy blocks at the following offsets within this range: */ +#define KVM_REG_ARM64_SVE_ZREG_BASE 0 +#define KVM_REG_ARM64_SVE_PREG_BASE 0x400 + +#define KVM_REG_ARM64_SVE_ZREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \ + KVM_REG_ARM64_SVE_ZREG_BASE | \ + KVM_REG_SIZE_U2048 | \ + ((n) << 5) | (i)) +#define KVM_REG_ARM64_SVE_PREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \ + KVM_REG_ARM64_SVE_PREG_BASE | \ + KVM_REG_SIZE_U256 | \ + ((n) << 5) | (i)) +#define KVM_REG_ARM64_SVE_FFR(i) KVM_REG_ARM64_SVE_PREG(16, i) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 756d0d614993..736d8cb8dad7 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -19,8 +19,11 @@ * along with this program. If not, see . */ +#include #include #include +#include +#include #include #include #include @@ -30,9 +33,12 @@ #include #include #include +#include #include #include #include +#include +#include #include "trace.h" @@ -200,6 +206,115 @@ out: return err; } +#define SVE_REG_SLICE_SHIFT 0 +#define SVE_REG_SLICE_BITS 5 +#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) +#define SVE_REG_ID_BITS 5 + +#define SVE_REG_SLICE_MASK \ + GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ + SVE_REG_SLICE_SHIFT) +#define SVE_REG_ID_MASK \ + GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) + +#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) + +#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) +#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) + +/* Bounds of a single SVE register slice within vcpu->arch.sve_state */ +struct sve_state_reg_region { + unsigned int koffset; /* offset into sve_state in kernel memory */ + unsigned int klen; /* length in kernel memory */ + unsigned int upad; /* extra trailing padding in user memory */ +}; + +/* Get sanitised bounds for user/kernel SVE register copy */ +static int sve_reg_to_region(struct sve_state_reg_region *region, + struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + /* reg ID ranges for Z- registers */ + const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); + const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, + SVE_NUM_SLICES - 1); + + /* reg ID ranges for P- registers and FFR (which are contiguous) */ + const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); + const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); + + unsigned int vq; + unsigned int reg_num; + + unsigned int reqoffset, reqlen; /* User-requested offset and length */ + unsigned int maxlen; /* Maxmimum permitted length */ + + size_t sve_state_size; + + /* Only the first slice ever exists, for now: */ + if ((reg->id & SVE_REG_SLICE_MASK) != 0) + return -ENOENT; + + vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + + reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; + + if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { + reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - + SVE_SIG_REGS_OFFSET; + reqlen = KVM_SVE_ZREG_SIZE; + maxlen = SVE_SIG_ZREG_SIZE(vq); + } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { + reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - + SVE_SIG_REGS_OFFSET; + reqlen = KVM_SVE_PREG_SIZE; + maxlen = SVE_SIG_PREG_SIZE(vq); + } else { + return -ENOENT; + } + + sve_state_size = vcpu_sve_state_size(vcpu); + if (!sve_state_size) + return -EINVAL; + + region->koffset = array_index_nospec(reqoffset, sve_state_size); + region->klen = min(maxlen, reqlen); + region->upad = reqlen - region->klen; + + return 0; +} + +static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + struct sve_state_reg_region region; + char __user *uptr = (char __user *)reg->addr; + + if (!vcpu_has_sve(vcpu) || sve_reg_to_region(®ion, vcpu, reg)) + return -ENOENT; + + if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, + region.klen) || + clear_user(uptr + region.klen, region.upad)) + return -EFAULT; + + return 0; +} + +static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + struct sve_state_reg_region region; + const char __user *uptr = (const char __user *)reg->addr; + + if (!vcpu_has_sve(vcpu) || sve_reg_to_region(®ion, vcpu, reg)) + return -ENOENT; + + if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, + region.klen)) + return -EFAULT; + + return 0; +} + int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { return -EINVAL; @@ -346,12 +461,12 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) return -EINVAL; - /* Register group 16 means we want a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return get_core_reg(vcpu, reg); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) - return kvm_arm_get_fw_reg(vcpu, reg); + switch (reg->id & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); + case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); + case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); + default: break; /* fall through */ + } if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -365,12 +480,12 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) return -EINVAL; - /* Register group 16 means we set a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return set_core_reg(vcpu, reg); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) - return kvm_arm_set_fw_reg(vcpu, reg); + switch (reg->id & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); + case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); + case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); + default: break; /* fall through */ + } if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); -- cgit v1.2.3 From ead9e430c0fba1a7aa6d948a626c48ef9c4aa069 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 28 Sep 2018 14:39:21 +0100 Subject: arm64/sve: In-kernel vector length availability query interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KVM will need to interrogate the set of SVE vector lengths available on the system. This patch exposes the relevant bits to the kernel, along with a sve_vq_available() helper to check whether a particular vector length is supported. __vq_to_bit() and __bit_to_vq() are not intended for use outside these functions: now that these are exposed outside fpsimd.c, they are prefixed with __ in order to provide an extra hint that they are not intended for general-purpose use. Signed-off-by: Dave Martin Reviewed-by: Alex Bennée Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/fpsimd.h | 29 +++++++++++++++++++++++++++++ arch/arm64/kernel/fpsimd.c | 35 ++++++++--------------------------- 2 files changed, 37 insertions(+), 27 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index df7a14305222..ad6d2e41eb37 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -24,10 +24,13 @@ #ifndef __ASSEMBLY__ +#include #include +#include #include #include #include +#include #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* Masks for extracting the FPSR and FPCR from the FPSCR */ @@ -89,6 +92,32 @@ extern u64 read_zcr_features(void); extern int __ro_after_init sve_max_vl; extern int __ro_after_init sve_max_virtualisable_vl; +/* Set of available vector lengths, as vq_to_bit(vq): */ +extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); + +/* + * Helpers to translate bit indices in sve_vq_map to VQ values (and + * vice versa). This allows find_next_bit() to be used to find the + * _maximum_ VQ not exceeding a certain value. + */ +static inline unsigned int __vq_to_bit(unsigned int vq) +{ + return SVE_VQ_MAX - vq; +} + +static inline unsigned int __bit_to_vq(unsigned int bit) +{ + if (WARN_ON(bit >= SVE_VQ_MAX)) + bit = SVE_VQ_MAX - 1; + + return SVE_VQ_MAX - bit; +} + +/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */ +static inline bool sve_vq_available(unsigned int vq) +{ + return test_bit(__vq_to_bit(vq), sve_vq_map); +} #ifdef CONFIG_ARM64_SVE diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 8a93afa78600..577296bba730 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -136,7 +136,7 @@ static int sve_default_vl = -1; int __ro_after_init sve_max_vl = SVE_VL_MIN; int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; /* Set of available vector lengths, as vq_to_bit(vq): */ -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); /* Set of vector lengths present on at least one cpu: */ static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); static void __percpu *efi_sve_state; @@ -269,25 +269,6 @@ void fpsimd_save(void) } } -/* - * Helpers to translate bit indices in sve_vq_map to VQ values (and - * vice versa). This allows find_next_bit() to be used to find the - * _maximum_ VQ not exceeding a certain value. - */ - -static unsigned int vq_to_bit(unsigned int vq) -{ - return SVE_VQ_MAX - vq; -} - -static unsigned int bit_to_vq(unsigned int bit) -{ - if (WARN_ON(bit >= SVE_VQ_MAX)) - bit = SVE_VQ_MAX - 1; - - return SVE_VQ_MAX - bit; -} - /* * All vector length selection from userspace comes through here. * We're on a slow path, so some sanity-checks are included. @@ -309,8 +290,8 @@ static unsigned int find_supported_vector_length(unsigned int vl) vl = max_vl; bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, - vq_to_bit(sve_vq_from_vl(vl))); - return sve_vl_from_vq(bit_to_vq(bit)); + __vq_to_bit(sve_vq_from_vl(vl))); + return sve_vl_from_vq(__bit_to_vq(bit)); } #ifdef CONFIG_SYSCTL @@ -648,7 +629,7 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ vl = sve_get_vl(); vq = sve_vq_from_vl(vl); /* skip intervening lengths */ - set_bit(vq_to_bit(vq), map); + set_bit(__vq_to_bit(vq), map); } } @@ -717,7 +698,7 @@ int sve_verify_vq_map(void) * Mismatches above sve_max_virtualisable_vl are fine, since * no guest is allowed to configure ZCR_EL2.LEN to exceed this: */ - if (sve_vl_from_vq(bit_to_vq(b)) <= sve_max_virtualisable_vl) { + if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", smp_processor_id()); return -EINVAL; @@ -801,8 +782,8 @@ void __init sve_setup(void) * so sve_vq_map must have at least SVE_VQ_MIN set. * If something went wrong, at least try to patch it up: */ - if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) - set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) + set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); @@ -831,7 +812,7 @@ void __init sve_setup(void) /* No virtualisable VLs? This is architecturally forbidden. */ sve_max_virtualisable_vl = SVE_VQ_MIN; else /* b + 1 < SVE_VQ_MAX */ - sve_max_virtualisable_vl = sve_vl_from_vq(bit_to_vq(b + 1)); + sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); if (sve_max_virtualisable_vl > sve_max_vl) sve_max_virtualisable_vl = sve_max_vl; -- cgit v1.2.3 From 0f062bfe36b63cd24f16afe2822d0df7c27904d9 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 28 Feb 2019 18:33:00 +0000 Subject: KVM: arm/arm64: Add hook for arch-specific KVM initialisation This patch adds a kvm_arm_init_arch_resources() hook to perform subarch-specific initialisation when starting up KVM. This will be used in a subsequent patch for global SVE-related setup on arm64. No functional change. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 2 ++ virt/kvm/arm/arm.c | 4 ++++ 3 files changed, 8 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 770d73257ad9..a49ee01242ff 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -53,6 +53,8 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +static inline int kvm_arm_init_arch_resources(void) { return 0; } + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 205438a108f6..3e8950942591 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -58,6 +58,8 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); +static inline int kvm_arm_init_arch_resources(void) { return 0; } + int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 99c37384ba7b..c69e1370a5dc 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1664,6 +1664,10 @@ int kvm_arch_init(void *opaque) if (err) return err; + err = kvm_arm_init_arch_resources(); + if (err) + return err; + if (!in_hyp_mode) { err = init_hyp_mode(); if (err) -- cgit v1.2.3 From 7dd32a0d0103a5941efbb971f85a3e930cc5665e Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 19 Dec 2018 14:27:01 +0000 Subject: KVM: arm/arm64: Add KVM_ARM_VCPU_FINALIZE ioctl Some aspects of vcpu configuration may be too complex to be completed inside KVM_ARM_VCPU_INIT. Thus, there may be a requirement for userspace to do some additional configuration before various other ioctls will work in a consistent way. In particular this will be the case for SVE, where userspace will need to negotiate the set of vector lengths to be made available to the guest before the vcpu becomes fully usable. In order to provide an explicit way for userspace to confirm that it has finished setting up a particular vcpu feature, this patch adds a new ioctl KVM_ARM_VCPU_FINALIZE. When userspace has opted into a feature that requires finalization, typically by means of a feature flag passed to KVM_ARM_VCPU_INIT, a matching call to KVM_ARM_VCPU_FINALIZE is now required before KVM_RUN or KVM_GET_REG_LIST is allowed. Individual features may impose additional restrictions where appropriate. No existing vcpu features are affected by this, so current userspace implementations will continue to work exactly as before, with no need to issue KVM_ARM_VCPU_FINALIZE. As implemented in this patch, KVM_ARM_VCPU_FINALIZE is currently a placeholder: no finalizable features exist yet, so ioctl is not required and will always yield EINVAL. Subsequent patches will add the finalization logic to make use of this ioctl for SVE. No functional change for existing userspace. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm64/include/asm/kvm_host.h | 4 ++++ include/uapi/linux/kvm.h | 3 +++ virt/kvm/arm/arm.c | 18 ++++++++++++++++++ 4 files changed, 29 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index a49ee01242ff..e80cfc18412b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -19,6 +19,7 @@ #ifndef __ARM_KVM_HOST_H__ #define __ARM_KVM_HOST_H__ +#include #include #include #include @@ -411,4 +412,7 @@ static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) return 0; } +#define kvm_arm_vcpu_finalize(vcpu, what) (-EINVAL) +#define kvm_arm_vcpu_is_finalized(vcpu) true + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3e8950942591..98658f7dad68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -23,6 +23,7 @@ #define __ARM64_KVM_HOST_H__ #include +#include #include #include #include @@ -625,4 +626,7 @@ void kvm_arch_free_vm(struct kvm *kvm); int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); +#define kvm_arm_vcpu_finalize(vcpu, what) (-EINVAL) +#define kvm_arm_vcpu_is_finalized(vcpu) true + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index dc77a5a3648d..c3b8e7a31315 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1441,6 +1441,9 @@ struct kvm_enc_region { /* Available with KVM_CAP_HYPERV_CPUID */ #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) +/* Available with KVM_CAP_ARM_SVE */ +#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c69e1370a5dc..9edbf0f676e7 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -545,6 +545,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) if (likely(vcpu->arch.has_run_once)) return 0; + if (!kvm_arm_vcpu_is_finalized(vcpu)) + return -EPERM; + vcpu->arch.has_run_once = true; if (likely(irqchip_in_kernel(kvm))) { @@ -1116,6 +1119,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (unlikely(!kvm_vcpu_initialized(vcpu))) break; + r = -EPERM; + if (!kvm_arm_vcpu_is_finalized(vcpu)) + break; + r = -EFAULT; if (copy_from_user(®_list, user_list, sizeof(reg_list))) break; @@ -1169,6 +1176,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return kvm_arm_vcpu_set_events(vcpu, &events); } + case KVM_ARM_VCPU_FINALIZE: { + int what; + + if (!kvm_vcpu_initialized(vcpu)) + return -ENOEXEC; + + if (get_user(what, (const int __user *)argp)) + return -EFAULT; + + return kvm_arm_vcpu_finalize(vcpu, what); + } default: r = -EINVAL; } -- cgit v1.2.3 From 9033bba4b53527b57bec217509a967a25cb19357 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 28 Feb 2019 18:46:44 +0000 Subject: KVM: arm64/sve: Add pseudo-register for the guest's vector lengths This patch adds a new pseudo-register KVM_REG_ARM64_SVE_VLS to allow userspace to set and query the set of vector lengths visible to the guest. In the future, multiple register slices per SVE register may be visible through the ioctl interface. Once the set of slices has been determined we would not be able to allow the vector length set to be changed any more, in order to avoid userspace seeing inconsistent sets of registers. For this reason, this patch adds support for explicit finalization of the SVE configuration via the KVM_ARM_VCPU_FINALIZE ioctl. Finalization is the proper place to allocate the SVE register state storage in vcpu->arch.sve_state, so this patch adds that as appropriate. The data is freed via kvm_arch_vcpu_uninit(), which was previously a no-op on arm64. To simplify the logic for determining what vector lengths can be supported, some code is added to KVM init to work this out, in the kvm_arm_init_arch_resources() hook. The KVM_REG_ARM64_SVE_VLS pseudo-register is not exposed yet. Subsequent patches will allow SVE to be turned on for guest vcpus, making it visible. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 15 +++-- arch/arm64/include/uapi/asm/kvm.h | 5 ++ arch/arm64/kvm/guest.c | 114 +++++++++++++++++++++++++++++++++++++- arch/arm64/kvm/reset.c | 89 +++++++++++++++++++++++++++++ 4 files changed, 215 insertions(+), 8 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 98658f7dad68..5475cc4df35c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -23,7 +23,6 @@ #define __ARM64_KVM_HOST_H__ #include -#include #include #include #include @@ -50,6 +49,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS +/* Will be incremented when KVM_ARM_VCPU_SVE is fully implemented: */ #define KVM_VCPU_MAX_FEATURES 4 #define KVM_REQ_SLEEP \ @@ -59,10 +59,12 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); -static inline int kvm_arm_init_arch_resources(void) { return 0; } +extern unsigned int kvm_sve_max_vl; +int kvm_arm_init_arch_resources(void); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); @@ -353,6 +355,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ +#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) @@ -525,7 +528,6 @@ static inline bool kvm_arch_requires_vhe(void) static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} @@ -626,7 +628,10 @@ void kvm_arch_free_vm(struct kvm *kvm); int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); -#define kvm_arm_vcpu_finalize(vcpu, what) (-EINVAL) -#define kvm_arm_vcpu_is_finalized(vcpu) true +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int what); +bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); + +#define kvm_arm_vcpu_sve_finalized(vcpu) \ + ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index ced760cc8478..6963b7e8062b 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -102,6 +102,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ +#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ struct kvm_vcpu_init { __u32 target; @@ -243,6 +244,10 @@ struct kvm_vcpu_events { ((n) << 5) | (i)) #define KVM_REG_ARM64_SVE_FFR(i) KVM_REG_ARM64_SVE_PREG(16, i) +/* Vector lengths pseudo-register: */ +#define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \ + KVM_REG_SIZE_U512 | 0xffff) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 2aa80a59e2a2..086ab0508d69 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -206,6 +206,73 @@ out: return err; } +#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) +#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) + +static bool vq_present( + const u64 (*const vqs)[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)], + unsigned int vq) +{ + return (*vqs)[vq_word(vq)] & vq_mask(vq); +} + +static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + unsigned int max_vq, vq; + u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)]; + + if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) + return -EINVAL; + + memset(vqs, 0, sizeof(vqs)); + + max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) + if (sve_vq_available(vq)) + vqs[vq_word(vq)] |= vq_mask(vq); + + if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) + return -EFAULT; + + return 0; +} + +static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + unsigned int max_vq, vq; + u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)]; + + if (kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; /* too late! */ + + if (WARN_ON(vcpu->arch.sve_state)) + return -EINVAL; + + if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) + return -EFAULT; + + max_vq = 0; + for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) + if (vq_present(&vqs, vq)) + max_vq = vq; + + if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) + return -EINVAL; + + for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) + if (vq_present(&vqs, vq) != sve_vq_available(vq)) + return -EINVAL; + + /* Can't run with no vector lengths at all: */ + if (max_vq < SVE_VQ_MIN) + return -EINVAL; + + /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ + vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); + + return 0; +} + #define SVE_REG_SLICE_SHIFT 0 #define SVE_REG_SLICE_BITS 5 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) @@ -296,7 +363,19 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) struct sve_state_reg_region region; char __user *uptr = (char __user *)reg->addr; - if (!vcpu_has_sve(vcpu) || sve_reg_to_region(®ion, vcpu, reg)) + if (!vcpu_has_sve(vcpu)) + return -ENOENT; + + /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ + if (reg->id == KVM_REG_ARM64_SVE_VLS) + return get_sve_vls(vcpu, reg); + + /* Otherwise, reg is an architectural SVE register... */ + + if (!kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; + + if (sve_reg_to_region(®ion, vcpu, reg)) return -ENOENT; if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, @@ -312,7 +391,19 @@ static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) struct sve_state_reg_region region; const char __user *uptr = (const char __user *)reg->addr; - if (!vcpu_has_sve(vcpu) || sve_reg_to_region(®ion, vcpu, reg)) + if (!vcpu_has_sve(vcpu)) + return -ENOENT; + + /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ + if (reg->id == KVM_REG_ARM64_SVE_VLS) + return set_sve_vls(vcpu, reg); + + /* Otherwise, reg is an architectural SVE register... */ + + if (!kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; + + if (sve_reg_to_region(®ion, vcpu, reg)) return -ENOENT; if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, @@ -426,7 +517,11 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) if (!vcpu_has_sve(vcpu)) return 0; - return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */); + /* Policed by KVM_GET_REG_LIST: */ + WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); + + return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) + + 1; /* KVM_REG_ARM64_SVE_VLS */ } static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, @@ -441,6 +536,19 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, if (!vcpu_has_sve(vcpu)) return 0; + /* Policed by KVM_GET_REG_LIST: */ + WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); + + /* + * Enumerate this first, so that userspace can save/restore in + * the order reported by KVM_GET_REG_LIST: + */ + reg = KVM_REG_ARM64_SVE_VLS; + if (put_user(reg, uindices++)) + return -EFAULT; + + ++num_regs; + for (i = 0; i < slices; i++) { for (n = 0; n < SVE_NUM_ZREGS; n++) { reg = KVM_REG_ARM64_SVE_ZREG(n, i); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f16a5f8ff2b4..e7f9c06fdbbb 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -23,11 +23,14 @@ #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -99,6 +102,92 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +unsigned int kvm_sve_max_vl; + +int kvm_arm_init_arch_resources(void) +{ + if (system_supports_sve()) { + kvm_sve_max_vl = sve_max_virtualisable_vl; + + /* + * The get_sve_reg()/set_sve_reg() ioctl interface will need + * to be extended with multiple register slice support in + * order to support vector lengths greater than + * SVE_VL_ARCH_MAX: + */ + if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) + kvm_sve_max_vl = SVE_VL_ARCH_MAX; + + /* + * Don't even try to make use of vector lengths that + * aren't available on all CPUs, for now: + */ + if (kvm_sve_max_vl < sve_max_vl) + pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", + kvm_sve_max_vl); + } + + return 0; +} + +/* + * Finalize vcpu's maximum SVE vector length, allocating + * vcpu->arch.sve_state as necessary. + */ +static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) +{ + void *buf; + unsigned int vl; + + vl = vcpu->arch.sve_max_vl; + + /* + * Resposibility for these properties is shared between + * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and + * set_sve_vls(). Double-check here just to be sure: + */ + if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || + vl > SVE_VL_ARCH_MAX)) + return -EIO; + + buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + vcpu->arch.sve_state = buf; + vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; + return 0; +} + +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int what) +{ + switch (what) { + case KVM_ARM_VCPU_SVE: + if (!vcpu_has_sve(vcpu)) + return -EINVAL; + + if (kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; + + return kvm_vcpu_finalize_sve(vcpu); + } + + return -EINVAL; +} + +bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) +{ + if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) + return false; + + return true; +} + +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + kfree(vcpu->arch.sve_state); +} + /** * kvm_reset_vcpu - sets core registers and sys_regs to reset value * @vcpu: The VCPU pointer -- cgit v1.2.3 From 9a3cdf26e3363ec5460ebe20c508114fa63bcf26 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 28 Feb 2019 18:56:50 +0000 Subject: KVM: arm64/sve: Allow userspace to enable SVE for vcpus Now that all the pieces are in place, this patch offers a new flag KVM_ARM_VCPU_SVE that userspace can pass to KVM_ARM_VCPU_INIT to turn on SVE for the guest, on a per-vcpu basis. As part of this, support for initialisation and reset of the SVE vector length set and registers is added in the appropriate places, as well as finally setting the KVM_ARM64_GUEST_HAS_SVE vcpu flag, to turn on the SVE support code. Allocation of the SVE register storage in vcpu->arch.sve_state is deferred until the SVE configuration is finalized, by which time the size of the registers is known. Setting the vector lengths supported by the vcpu is considered configuration of the emulated hardware rather than runtime configuration, so no support is offered for changing the vector lengths available to an existing vcpu across reset. Signed-off-by: Dave Martin Reviewed-by: Julien Thierry Tested-by: zhang.lei Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 +-- arch/arm64/kvm/reset.c | 43 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5475cc4df35c..9d57cf8be879 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -49,8 +49,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -/* Will be incremented when KVM_ARM_VCPU_SVE is fully implemented: */ -#define KVM_VCPU_MAX_FEATURES 4 +#define KVM_VCPU_MAX_FEATURES 5 #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e7f9c06fdbbb..32c5ac0a3872 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -20,10 +20,12 @@ */ #include +#include #include #include #include #include +#include #include #include @@ -37,6 +39,7 @@ #include #include #include +#include /* Maximum phys_shift supported for any VM on this host */ static u32 kvm_ipa_limit; @@ -130,6 +133,27 @@ int kvm_arm_init_arch_resources(void) return 0; } +static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) +{ + if (!system_supports_sve()) + return -EINVAL; + + /* Verify that KVM startup enforced this when SVE was detected: */ + if (WARN_ON(!has_vhe())) + return -EINVAL; + + vcpu->arch.sve_max_vl = kvm_sve_max_vl; + + /* + * Userspace can still customize the vector lengths by writing + * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until + * kvm_arm_vcpu_finalize(), which freezes the configuration. + */ + vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; + + return 0; +} + /* * Finalize vcpu's maximum SVE vector length, allocating * vcpu->arch.sve_state as necessary. @@ -188,13 +212,20 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) kfree(vcpu->arch.sve_state); } +static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) +{ + if (vcpu_has_sve(vcpu)) + memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); +} + /** * kvm_reset_vcpu - sets core registers and sys_regs to reset value * @vcpu: The VCPU pointer * * This function finds the right table above and sets the registers on * the virtual CPU struct to their architecturally defined reset - * values. + * values, except for registers whose reset is deferred until + * kvm_arm_vcpu_finalize(). * * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT * ioctl or as part of handling a request issued by another VCPU in the PSCI @@ -217,6 +248,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (loaded) kvm_arch_vcpu_put(vcpu); + if (!kvm_arm_vcpu_sve_finalized(vcpu)) { + if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { + ret = kvm_vcpu_enable_sve(vcpu); + if (ret) + goto out; + } + } else { + kvm_vcpu_reset_sve(vcpu); + } + switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { -- cgit v1.2.3 From 624835abf9e26bf27e627fd54c2a2b36972b14f0 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 11 Apr 2019 16:53:18 +0100 Subject: arm64/sve: Clarify vq map semantics Currently the meanings of sve_vq_map and the ancillary helpers __bit_to_vq() and __vq_to_bit() are not clearly explained. This patch makes the explanatory comment clearer, and removes the duplicate comment from fpsimd.h. The WARN_ON() currently present in __bit_to_vq() confuses the intended use of this helper. Since these are low-level helpers not intended for general-purpose use anyway, it is better not to make guesses about how these functions will be used: rather, this patch removes the WARN_ON() and relies on callers to use the helpers sensibly. Suggested-by: Andrew Jones Signed-off-by: Dave Martin Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/fpsimd.h | 4 ---- arch/arm64/kernel/fpsimd.c | 7 ++++++- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index ad6d2e41eb37..df62bbd33a9a 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -92,7 +92,6 @@ extern u64 read_zcr_features(void); extern int __ro_after_init sve_max_vl; extern int __ro_after_init sve_max_virtualisable_vl; -/* Set of available vector lengths, as vq_to_bit(vq): */ extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); /* @@ -107,9 +106,6 @@ static inline unsigned int __vq_to_bit(unsigned int vq) static inline unsigned int __bit_to_vq(unsigned int bit) { - if (WARN_ON(bit >= SVE_VQ_MAX)) - bit = SVE_VQ_MAX - 1; - return SVE_VQ_MAX - bit; } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 577296bba730..56afa40263d9 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -135,10 +135,15 @@ static int sve_default_vl = -1; /* Maximum supported vector length across all CPUs (initially poisoned) */ int __ro_after_init sve_max_vl = SVE_VL_MIN; int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; -/* Set of available vector lengths, as vq_to_bit(vq): */ + +/* + * Set of available vector lengths, + * where length vq encoded as bit __vq_to_bit(vq): + */ __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); /* Set of vector lengths present on at least one cpu: */ static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); + static void __percpu *efi_sve_state; #else /* ! CONFIG_ARM64_SVE */ -- cgit v1.2.3 From a3be836df7cb777fa8ecbfd662224bfe0394f771 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 12 Apr 2019 15:30:58 +0100 Subject: KVM: arm/arm64: Demote kvm_arm_init_arch_resources() to just set up SVE The introduction of kvm_arm_init_arch_resources() looks like premature factoring, since nothing else uses this hook yet and it is not clear what will use it in the future. For now, let's not pretend that this is a general thing: This patch simply renames the function to kvm_arm_init_sve(), retaining the arm stub version under the new name. Suggested-by: Andrew Jones Signed-off-by: Dave Martin Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/reset.c | 2 +- virt/kvm/arm/arm.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index e80cfc18412b..d95627393353 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -54,7 +54,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); -static inline int kvm_arm_init_arch_resources(void) { return 0; } +static inline int kvm_arm_init_sve(void) { return 0; } u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9d57cf8be879..6adf08ba9277 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -59,7 +59,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); extern unsigned int kvm_sve_max_vl; -int kvm_arm_init_arch_resources(void); +int kvm_arm_init_sve(void); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f13378d0a0ad..8847f389f56d 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -110,7 +110,7 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) unsigned int kvm_sve_max_vl; -int kvm_arm_init_arch_resources(void) +int kvm_arm_init_sve(void) { if (system_supports_sve()) { kvm_sve_max_vl = sve_max_virtualisable_vl; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9edbf0f676e7..7039c99cc217 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1682,7 +1682,7 @@ int kvm_arch_init(void *opaque) if (err) return err; - err = kvm_arm_init_arch_resources(); + err = kvm_arm_init_sve(); if (err) return err; -- cgit v1.2.3 From 92e68b2b1ba004be55b2094fb8b5c11d0b24d11d Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 10 Apr 2019 17:17:37 +0100 Subject: KVM: arm/arm64: Clean up vcpu finalization function parameter naming Currently, the internal vcpu finalization functions use a different name ("what") for the feature parameter than the name ("feature") used in the documentation. To avoid future confusion, this patch converts everything to use the name "feature" consistently. No functional change. Suggested-by: Andrew Jones Signed-off-by: Dave Martin Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/reset.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 7feddacbc207..fe7754315e9c 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -412,7 +412,7 @@ static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) return 0; } -static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int what) +static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) { return -EINVAL; } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6adf08ba9277..7a096fdb333d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -627,7 +627,7 @@ void kvm_arch_free_vm(struct kvm *kvm); int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); -int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int what); +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_arm_vcpu_sve_finalized(vcpu) \ diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 8847f389f56d..3402543fdcd3 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -186,9 +186,9 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) return 0; } -int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int what) +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) { - switch (what) { + switch (feature) { case KVM_ARM_VCPU_SVE: if (!vcpu_has_sve(vcpu)) return -EINVAL; -- cgit v1.2.3 From b890d75c4cdc963c96e7774b088120966c23ab8e Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Tue, 23 Apr 2019 10:12:34 +0530 Subject: KVM: arm64: Add a vcpu flag to control ptrauth for guest A per vcpu flag is added to check if pointer authentication is enabled for the vcpu or not. This flag may be enabled according to the necessary user policies and host capabilities. This patch also adds a helper to check the flag. Reviewed-by: Dave Martin Signed-off-by: Amit Daniel Kachhap Cc: Mark Rutland Cc: Marc Zyngier Cc: Christoffer Dall Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7a096fdb333d..7ccac42a91a6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -355,10 +355,15 @@ struct kvm_vcpu_arch { #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ +#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) +#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ + system_supports_generic_auth()) && \ + ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) + #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) /* -- cgit v1.2.3 From 384b40caa8afae44a54e8f69bd37097c0279fdce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 23 Apr 2019 10:12:35 +0530 Subject: KVM: arm/arm64: Context-switch ptrauth registers When pointer authentication is supported, a guest may wish to use it. This patch adds the necessary KVM infrastructure for this to work, with a semi-lazy context switch of the pointer auth state. Pointer authentication feature is only enabled when VHE is built in the kernel and present in the CPU implementation so only VHE code paths are modified. When we schedule a vcpu, we disable guest usage of pointer authentication instructions and accesses to the keys. While these are disabled, we avoid context-switching the keys. When we trap the guest trying to use pointer authentication functionality, we change to eagerly context-switching the keys, and enable the feature. The next time the vcpu is scheduled out/in, we start again. However the host key save is optimized and implemented inside ptrauth instruction/register access trap. Pointer authentication consists of address authentication and generic authentication, and CPUs in a system might have varied support for either. Where support for either feature is not uniform, it is hidden from guests via ID register emulation, as a result of the cpufeature framework in the host. Unfortunately, address authentication and generic authentication cannot be trapped separately, as the architecture provides a single EL2 trap covering both. If we wish to expose one without the other, we cannot prevent a (badly-written) guest from intermittently using a feature which is not uniformly supported (when scheduled on a physical CPU which supports the relevant feature). Hence, this patch expects both type of authentication to be present in a cpu. This switch of key is done from guest enter/exit assembly as preparation for the upcoming in-kernel pointer authentication support. Hence, these key switching routines are not implemented in C code as they may cause pointer authentication key signing error in some situations. Signed-off-by: Mark Rutland [Only VHE, key switch in full assembly, vcpu_has_ptrauth checks , save host key in ptrauth exception trap] Signed-off-by: Amit Daniel Kachhap Reviewed-by: Julien Thierry Cc: Christoffer Dall Cc: kvmarm@lists.cs.columbia.edu [maz: various fixups] Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 2 + arch/arm64/Kconfig | 6 +- arch/arm64/include/asm/kvm_emulate.h | 16 +++++ arch/arm64/include/asm/kvm_host.h | 14 +++++ arch/arm64/include/asm/kvm_ptrauth.h | 111 +++++++++++++++++++++++++++++++++++ arch/arm64/kernel/asm-offsets.c | 6 ++ arch/arm64/kvm/handle_exit.c | 36 +++++++++--- arch/arm64/kvm/hyp/entry.S | 15 +++++ arch/arm64/kvm/sys_regs.c | 50 +++++++++++++--- virt/kvm/arm/arm.c | 2 + 10 files changed, 240 insertions(+), 18 deletions(-) create mode 100644 arch/arm64/include/asm/kvm_ptrauth.h (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8927cae7c966..efb0e2c0d84c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -343,4 +343,6 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, } } +static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7e34b9eba5de..39470784a50c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1288,6 +1288,7 @@ menu "ARMv8.3 architectural features" config ARM64_PTR_AUTH bool "Enable support for pointer authentication" default y + depends on !KVM || ARM64_VHE help Pointer authentication (part of the ARMv8.3 Extensions) provides instructions for signing and authenticating pointers against secret @@ -1301,8 +1302,9 @@ config ARM64_PTR_AUTH context-switched along with the process. The feature is detected at runtime. If the feature is not present in - hardware it will not be advertised to userspace nor will it be - enabled. + hardware it will not be advertised to userspace/KVM guest nor will it + be enabled. However, KVM guest also require VHE mode and hence + CONFIG_ARM64_VHE=y option to use this feature. endmenu diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d3842791e1c4..613427fafff9 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -98,6 +98,22 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 |= HCR_TWE; } +static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); +} + +static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); +} + +static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) +{ + if (vcpu_has_ptrauth(vcpu)) + vcpu_ptrauth_disable(vcpu); +} + static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7ccac42a91a6..7eebea7059c6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -161,6 +161,18 @@ enum vcpu_sysreg { PMSWINC_EL0, /* Software Increment Register */ PMUSERENR_EL0, /* User Enable Register */ + /* Pointer Authentication Registers in a strict increasing order. */ + APIAKEYLO_EL1, + APIAKEYHI_EL1, + APIBKEYLO_EL1, + APIBKEYHI_EL1, + APDAKEYLO_EL1, + APDAKEYHI_EL1, + APDBKEYLO_EL1, + APDBKEYHI_EL1, + APGAKEYLO_EL1, + APGAKEYHI_EL1, + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ @@ -530,6 +542,8 @@ static inline bool kvm_arch_requires_vhe(void) return false; } +void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); + static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h new file mode 100644 index 000000000000..6301813dcace --- /dev/null +++ b/arch/arm64/include/asm/kvm_ptrauth.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore + * Copyright 2019 Arm Limited + * Authors: Mark Rutland + * Amit Daniel Kachhap + */ + +#ifndef __ASM_KVM_PTRAUTH_H +#define __ASM_KVM_PTRAUTH_H + +#ifdef __ASSEMBLY__ + +#include + +#ifdef CONFIG_ARM64_PTR_AUTH + +#define PTRAUTH_REG_OFFSET(x) (x - CPU_APIAKEYLO_EL1) + +/* + * CPU_AP*_EL1 values exceed immediate offset range (512) for stp + * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and + * calculates the offset of the keys from this base to avoid an extra add + * instruction. These macros assumes the keys offsets follow the order of + * the sysreg enum in kvm_host.h. + */ +.macro ptrauth_save_state base, reg1, reg2 + mrs_s \reg1, SYS_APIAKEYLO_EL1 + mrs_s \reg2, SYS_APIAKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)] + mrs_s \reg1, SYS_APIBKEYLO_EL1 + mrs_s \reg2, SYS_APIBKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)] + mrs_s \reg1, SYS_APDAKEYLO_EL1 + mrs_s \reg2, SYS_APDAKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)] + mrs_s \reg1, SYS_APDBKEYLO_EL1 + mrs_s \reg2, SYS_APDBKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)] + mrs_s \reg1, SYS_APGAKEYLO_EL1 + mrs_s \reg2, SYS_APGAKEYHI_EL1 + stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)] +.endm + +.macro ptrauth_restore_state base, reg1, reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)] + msr_s SYS_APIAKEYLO_EL1, \reg1 + msr_s SYS_APIAKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)] + msr_s SYS_APIBKEYLO_EL1, \reg1 + msr_s SYS_APIBKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)] + msr_s SYS_APDAKEYLO_EL1, \reg1 + msr_s SYS_APDAKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)] + msr_s SYS_APDBKEYLO_EL1, \reg1 + msr_s SYS_APDBKEYHI_EL1, \reg2 + ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)] + msr_s SYS_APGAKEYLO_EL1, \reg1 + msr_s SYS_APGAKEYHI_EL1, \reg2 +.endm + +/* + * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will + * check for the presence of one of the cpufeature flag + * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and + * then proceed ahead with the save/restore of Pointer Authentication + * key registers. + */ +.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 +alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH + b 1000f +alternative_else_nop_endif +alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF + b 1001f +alternative_else_nop_endif +1000: + ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] + and \reg1, \reg1, #(HCR_API | HCR_APK) + cbz \reg1, 1001f + add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 + ptrauth_restore_state \reg1, \reg2, \reg3 +1001: +.endm + +.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 +alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH + b 2000f +alternative_else_nop_endif +alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF + b 2001f +alternative_else_nop_endif +2000: + ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] + and \reg1, \reg1, #(HCR_API | HCR_APK) + cbz \reg1, 2001f + add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 + ptrauth_save_state \reg1, \reg2, \reg3 + add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1 + ptrauth_restore_state \reg1, \reg2, \reg3 + isb +2001: +.endm + +#else /* !CONFIG_ARM64_PTR_AUTH */ +.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 +.endm +.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 +.endm +#endif /* CONFIG_ARM64_PTR_AUTH */ +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_KVM_PTRAUTH_H */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7f40dcbdd51d..8178330a9f7a 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -125,7 +125,13 @@ int main(void) DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); + DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); + DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); + DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); + DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); + DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); #endif diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 0b7983442071..516aead3c2a9 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -173,20 +173,40 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) + +/* + * Handle the guest trying to use a ptrauth instruction, or trying to access a + * ptrauth register. + */ +void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *ctxt; + + if (vcpu_has_ptrauth(vcpu)) { + vcpu_ptrauth_enable(vcpu); + ctxt = vcpu->arch.host_cpu_context; + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + } else { + kvm_inject_undefined(vcpu); + } +} + /* * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into * a NOP). */ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) { - /* - * We don't currently support ptrauth in a guest, and we mask the ID - * registers to prevent well-behaved guests from trying to make use of - * it. - * - * Inject an UNDEF, as if the feature really isn't present. - */ - kvm_inject_undefined(vcpu); + kvm_arm_vcpu_ptrauth_trap(vcpu); return 1; } diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 675fdc186e3b..93ba3d7ef027 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -24,6 +24,7 @@ #include #include #include +#include #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) @@ -64,6 +65,13 @@ ENTRY(__guest_enter) add x18, x0, #VCPU_CONTEXT + // Macro ptrauth_switch_to_guest format: + // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) + // The below macro to restore guest keys is not implemented in C code + // as it may cause Pointer Authentication key signing mismatch errors + // when this feature is enabled for kernel code. + ptrauth_switch_to_guest x18, x0, x1, x2 + // Restore guest regs x0-x17 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] @@ -118,6 +126,13 @@ ENTRY(__guest_exit) get_host_ctxt x2, x3 + // Macro ptrauth_switch_to_guest format: + // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) + // The below macro to save/restore keys is not implemented in C code + // as it may cause Pointer Authentication key signing mismatch errors + // when this feature is enabled for kernel code. + ptrauth_switch_to_host x1, x2, x3, x4, x5 + // Now restore the host regs restore_callee_saved_regs x2 diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7046c7686321..12bd72e42b91 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1007,6 +1007,37 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } +static bool trap_ptrauth(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) +{ + kvm_arm_vcpu_ptrauth_trap(vcpu); + + /* + * Return false for both cases as we never skip the trapped + * instruction: + * + * - Either we re-execute the same key register access instruction + * after enabling ptrauth. + * - Or an UNDEF is injected as ptrauth is not supported/enabled. + */ + return false; +} + +static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST; +} + +#define __PTRAUTH_KEY(k) \ + { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \ + .visibility = ptrauth_visibility} + +#define PTRAUTH_KEY(k) \ + __PTRAUTH_KEY(k ## KEYLO_EL1), \ + __PTRAUTH_KEY(k ## KEYHI_EL1) + static bool access_arch_timer(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -1053,14 +1084,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); - } else if (id == SYS_ID_AA64ISAR1_EL1) { - const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) | - (0xfUL << ID_AA64ISAR1_API_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | - (0xfUL << ID_AA64ISAR1_GPI_SHIFT); - if (val & ptrauth_mask) - kvm_debug("ptrauth unsupported for guests, suppressing\n"); - val &= ~ptrauth_mask; + } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { + val &= ~(0xfUL << ID_AA64ISAR1_APA_SHIFT) | + (0xfUL << ID_AA64ISAR1_API_SHIFT) | + (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | + (0xfUL << ID_AA64ISAR1_GPI_SHIFT); } return val; @@ -1460,6 +1488,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, + PTRAUTH_KEY(APIA), + PTRAUTH_KEY(APIB), + PTRAUTH_KEY(APDA), + PTRAUTH_KEY(APDB), + PTRAUTH_KEY(APGA), + { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 7039c99cc217..156c09da9e2b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -385,6 +385,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_clear_wfe_traps(vcpu); else vcpu_set_wfe_traps(vcpu); + + vcpu_ptrauth_setup_lazy(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From a22fa321d13b0264976cbbc1d22f4c27c41d3642 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Tue, 23 Apr 2019 10:12:36 +0530 Subject: KVM: arm64: Add userspace flag to enable pointer authentication Now that the building blocks of pointer authentication are present, lets add userspace flags KVM_ARM_VCPU_PTRAUTH_ADDRESS and KVM_ARM_VCPU_PTRAUTH_GENERIC. These flags will enable pointer authentication for the KVM guest on a per-vcpu basis through the ioctl KVM_ARM_VCPU_INIT. This features will allow the KVM guest to allow the handling of pointer authentication instructions or to treat them as undefined if not set. Necessary documentations are added to reflect the changes done. Reviewed-by: Dave Martin Signed-off-by: Amit Daniel Kachhap Cc: Mark Rutland Cc: Marc Zyngier Cc: Christoffer Dall Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Marc Zyngier --- Documentation/arm64/pointer-authentication.txt | 22 +++++++++++++++++---- Documentation/virtual/kvm/api.txt | 10 ++++++++++ arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/uapi/asm/kvm.h | 2 ++ arch/arm64/kvm/reset.c | 27 ++++++++++++++++++++++++++ 5 files changed, 58 insertions(+), 5 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt index 5baca42ba146..fc71b33de87e 100644 --- a/Documentation/arm64/pointer-authentication.txt +++ b/Documentation/arm64/pointer-authentication.txt @@ -87,7 +87,21 @@ used to get and set the keys for a thread. Virtualization -------------- -Pointer authentication is not currently supported in KVM guests. KVM -will mask the feature bits from ID_AA64ISAR1_EL1, and attempted use of -the feature will result in an UNDEFINED exception being injected into -the guest. +Pointer authentication is enabled in KVM guest when each virtual cpu is +initialised by passing flags KVM_ARM_VCPU_PTRAUTH_[ADDRESS/GENERIC] and +requesting these two separate cpu features to be enabled. The current KVM +guest implementation works by enabling both features together, so both +these userspace flags are checked before enabling pointer authentication. +The separate userspace flag will allow to have no userspace ABI changes +if support is added in the future to allow these two features to be +enabled independently of one another. + +As Arm Architecture specifies that Pointer Authentication feature is +implemented along with the VHE feature so KVM arm64 ptrauth code relies +on VHE mode to be present. + +Additionally, when these vcpu feature flags are not set then KVM will +filter out the Pointer Authentication system key registers from +KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID +register. Any attempt to use the Pointer Authentication instructions will +result in an UNDEFINED exception being injected into the guest. diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index e410a9f0f0d4..32afe7f5c35a 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2761,6 +2761,16 @@ Possible features: - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. Depends on KVM_CAP_ARM_PMU_V3. + - KVM_ARM_VCPU_PTRAUTH_ADDRESS: Enables Address Pointer authentication + for arm64 only. + Both KVM_ARM_VCPU_PTRAUTH_ADDRESS and KVM_ARM_VCPU_PTRAUTH_GENERIC + must be requested or neither must be requested. + + - KVM_ARM_VCPU_PTRAUTH_GENERIC: Enables Generic Pointer authentication + for arm64 only. + Both KVM_ARM_VCPU_PTRAUTH_ADDRESS and KVM_ARM_VCPU_PTRAUTH_GENERIC + must be requested or neither must be requested. + - KVM_ARM_VCPU_SVE: Enables SVE for the CPU (arm64 only). Depends on KVM_CAP_ARM_SVE. Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7eebea7059c6..f772ac2fb3e9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -49,7 +49,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 5 +#define KVM_VCPU_MAX_FEATURES 7 #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index edd2db8e5160..7b7ac0f6cec9 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -104,6 +104,8 @@ struct kvm_regs { #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ +#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ +#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ struct kvm_vcpu_init { __u32 target; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 3402543fdcd3..028d0c604652 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -221,6 +221,27 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); } +static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) +{ + /* Support ptrauth only if the system supports these capabilities. */ + if (!has_vhe()) + return -EINVAL; + + if (!system_supports_address_auth() || + !system_supports_generic_auth()) + return -EINVAL; + /* + * For now make sure that both address/generic pointer authentication + * features are requested by the userspace together. + */ + if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || + !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) + return -EINVAL; + + vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; + return 0; +} + /** * kvm_reset_vcpu - sets core registers and sys_regs to reset value * @vcpu: The VCPU pointer @@ -261,6 +282,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) kvm_vcpu_reset_sve(vcpu); } + if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || + test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { + if (kvm_vcpu_enable_ptrauth(vcpu)) + goto out; + } + switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { -- cgit v1.2.3 From 630a16854d2d28d13e96ff27ab43cc5caa4609fc Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Tue, 9 Apr 2019 20:22:11 +0100 Subject: arm64: KVM: Encapsulate kvm_cpu_context in kvm_host_data The virt/arm core allocates a kvm_cpu_context_t percpu, at present this is a typedef to kvm_cpu_context and is used to store host cpu context. The kvm_cpu_context structure is also used elsewhere to hold vcpu context. In order to use the percpu to hold additional future host information we encapsulate kvm_cpu_context in a new structure and rename the typedef and percpu to match. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 10 +++++++--- arch/arm64/include/asm/kvm_asm.h | 3 ++- arch/arm64/include/asm/kvm_host.h | 16 ++++++++++------ arch/arm64/kernel/asm-offsets.c | 1 + virt/kvm/arm/arm.c | 14 ++++++++------ 5 files changed, 28 insertions(+), 16 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index fe7754315e9c..2d721ab05925 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -153,9 +153,13 @@ struct kvm_cpu_context { u32 cp15[NR_CP15_REGS]; }; -typedef struct kvm_cpu_context kvm_cpu_context_t; +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; +}; + +typedef struct kvm_host_data kvm_host_data_t; -static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, int cpu) { /* The host's MPIDR is immutable, so let's set it up at boot time */ @@ -185,7 +189,7 @@ struct kvm_vcpu_arch { struct kvm_vcpu_fault_info fault; /* Host FP context */ - kvm_cpu_context_t *host_cpu_context; + struct kvm_cpu_context *host_cpu_context; /* VGIC state */ struct vgic_cpu vgic_cpu; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f5b79e995f40..ff73f5462aca 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void); .endm .macro get_host_ctxt reg, tmp - hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp + hyp_adr_this_cpu \reg, kvm_host_data, \tmp + add \reg, \reg, #HOST_DATA_CONTEXT .endm .macro get_vcpu_ptr vcpu, ctxt diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f772ac2fb3e9..9ba59832b71a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -233,7 +233,11 @@ struct kvm_cpu_context { struct kvm_vcpu *__hyp_running_vcpu; }; -typedef struct kvm_cpu_context kvm_cpu_context_t; +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; +}; + +typedef struct kvm_host_data kvm_host_data_t; struct vcpu_reset_state { unsigned long pc; @@ -278,7 +282,7 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch external_debug_state; /* Pointer to host CPU context */ - kvm_cpu_context_t *host_cpu_context; + struct kvm_cpu_context *host_cpu_context; struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ @@ -483,9 +487,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); +DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); -static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, int cpu) { /* The host's MPIDR is immutable, so let's set it up at boot time */ @@ -503,8 +507,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * kernel's mapping to the linear mapping, and store it in tpidr_el2 * so that we can use adr_l to access per-cpu variables in EL2. */ - u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - - (u64)kvm_ksym_ref(kvm_host_cpu_state)); + u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) - + (u64)kvm_ksym_ref(kvm_host_data)); /* * Call initialization code, and switch to the full blown HYP code. diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 8178330a9f7a..768b23101ff0 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -134,6 +134,7 @@ int main(void) DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); + DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 156c09da9e2b..e960b91551d6 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -56,7 +56,7 @@ __asm__(".arch_extension virt"); #endif -DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); +DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* Per-CPU variable containing the currently running vcpu. */ @@ -360,8 +360,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; + kvm_host_data_t *cpu_data; last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); + cpu_data = this_cpu_ptr(&kvm_host_data); /* * We might get preempted before the vCPU actually runs, but @@ -373,7 +375,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } vcpu->cpu = cpu; - vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state); + vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; kvm_arm_set_running_vcpu(vcpu); kvm_vgic_load(vcpu); @@ -1569,11 +1571,11 @@ static int init_hyp_mode(void) } for_each_possible_cpu(cpu) { - kvm_cpu_context_t *cpu_ctxt; + kvm_host_data_t *cpu_data; - cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); - kvm_init_host_cpu_context(cpu_ctxt, cpu); - err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); + cpu_data = per_cpu_ptr(&kvm_host_data, cpu); + kvm_init_host_cpu_context(&cpu_data->host_ctxt, cpu); + err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); if (err) { kvm_err("Cannot map host CPU state: %d\n", err); -- cgit v1.2.3 From eb41238cf19fda694e3a99c1f4f58bd88479a5ee Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Tue, 9 Apr 2019 20:22:12 +0100 Subject: arm64: KVM: Add accessors to track guest/host only counters In order to effeciently switch events_{guest,host} perf counters at guest entry/exit we add bitfields to kvm_cpu_context for guest and host events as well as accessors for updating them. A function is also provided which allows the PMU driver to determine if a counter should start counting when it is enabled. With exclude_host, we may only start counting when entering the guest. Signed-off-by: Andrew Murray Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 17 +++++++++++++++ arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/pmu.c | 45 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kvm/pmu.c (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9ba59832b71a..655ad08edc3a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -233,8 +233,14 @@ struct kvm_cpu_context { struct kvm_vcpu *__hyp_running_vcpu; }; +struct kvm_pmu_events { + u32 events_host; + u32 events_guest; +}; + struct kvm_host_data { struct kvm_cpu_context host_ctxt; + struct kvm_pmu_events pmu_events; }; typedef struct kvm_host_data kvm_host_data_t; @@ -572,11 +578,22 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) +{ + return attr->exclude_host; +} + #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { return kvm_arch_vcpu_run_map_fp(vcpu); } + +void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); +void kvm_clr_pmu_events(u32 clr); +#else +static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} +static inline void kvm_clr_pmu_events(u32 clr) {} #endif static inline void kvm_arm_vhe_guest_enter(void) diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 690e033a91c0..3ac1a64d2fb9 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o +kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c new file mode 100644 index 000000000000..5414b134f99a --- /dev/null +++ b/arch/arm64/kvm/pmu.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Arm Limited + * Author: Andrew Murray + */ +#include +#include + +/* + * Given the exclude_{host,guest} attributes, determine if we are going + * to need to switch counters at guest entry/exit. + */ +static bool kvm_pmu_switch_needed(struct perf_event_attr *attr) +{ + /* Only switch if attributes are different */ + return (attr->exclude_host != attr->exclude_guest); +} + +/* + * Add events to track that we may want to switch at guest entry/exit + * time. + */ +void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) +{ + struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); + + if (!kvm_pmu_switch_needed(attr)) + return; + + if (!attr->exclude_host) + ctx->pmu_events.events_host |= set; + if (!attr->exclude_guest) + ctx->pmu_events.events_guest |= set; +} + +/* + * Stop tracking events + */ +void kvm_clr_pmu_events(u32 clr) +{ + struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); + + ctx->pmu_events.events_host &= ~clr; + ctx->pmu_events.events_guest &= ~clr; +} -- cgit v1.2.3 From 3d91befbb3a0fcec6e1eebde45c8074b88cc9441 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Tue, 9 Apr 2019 20:22:14 +0100 Subject: arm64: KVM: Enable !VHE support for :G/:H perf event modifiers Enable/disable event counters as appropriate when entering and exiting the guest to enable support for guest or host only event counting. For both VHE and non-VHE we switch the counters between host/guest at EL2. The PMU may be on when we change which counters are enabled however we avoid adding an isb as we instead rely on existing context synchronisation events: the eret to enter the guest (__guest_enter) and eret in kvm_call_hyp for __kvm_vcpu_run_nvhe on returning. Signed-off-by: Andrew Murray Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/hyp/switch.c | 6 ++++++ arch/arm64/kvm/pmu.c | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 655ad08edc3a..645d74c705d6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -591,6 +591,9 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u32 clr); + +void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt); +bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt); #else static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 5444b9c6fb5c..22b4c335e0b2 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -566,6 +566,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; + bool pmu_switch_needed; u64 exit_code; /* @@ -585,6 +586,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; + pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); + __sysreg_save_state_nvhe(host_ctxt); __activate_vm(kern_hyp_va(vcpu->kvm)); @@ -631,6 +634,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) */ __debug_switch_to_host(vcpu); + if (pmu_switch_needed) + __pmu_switch_to_host(host_ctxt); + /* Returning to host will clear PSR.I, remask PMR if needed */ if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQOFF); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 5414b134f99a..599e6d3f692e 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -5,6 +5,7 @@ */ #include #include +#include /* * Given the exclude_{host,guest} attributes, determine if we are going @@ -43,3 +44,41 @@ void kvm_clr_pmu_events(u32 clr) ctx->pmu_events.events_host &= ~clr; ctx->pmu_events.events_guest &= ~clr; } + +/** + * Disable host events, enable guest events + */ +bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_host_data *host; + struct kvm_pmu_events *pmu; + + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + pmu = &host->pmu_events; + + if (pmu->events_host) + write_sysreg(pmu->events_host, pmcntenclr_el0); + + if (pmu->events_guest) + write_sysreg(pmu->events_guest, pmcntenset_el0); + + return (pmu->events_host || pmu->events_guest); +} + +/** + * Disable guest events, enable host events + */ +void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_host_data *host; + struct kvm_pmu_events *pmu; + + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + pmu = &host->pmu_events; + + if (pmu->events_guest) + write_sysreg(pmu->events_guest, pmcntenclr_el0); + + if (pmu->events_host) + write_sysreg(pmu->events_host, pmcntenset_el0); +} -- cgit v1.2.3 From 435e53fb5e21ad1820c5c69f208304c0e5623d01 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Tue, 9 Apr 2019 20:22:15 +0100 Subject: arm64: KVM: Enable VHE support for :G/:H perf event modifiers With VHE different exception levels are used between the host (EL2) and guest (EL1) with a shared exception level for userpace (EL0). We can take advantage of this and use the PMU's exception level filtering to avoid enabling/disabling counters in the world-switch code. Instead we just modify the counter type to include or exclude EL0 at vcpu_{load,put} time. We also ensure that trapped PMU system register writes do not re-enable EL0 when reconfiguring the backing perf events. This approach completely avoids blackout windows seen with !VHE. Suggested-by: Christoffer Dall Signed-off-by: Andrew Murray Acked-by: Will Deacon Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 3 ++ arch/arm64/include/asm/kvm_host.h | 5 ++- arch/arm64/kernel/perf_event.c | 6 ++- arch/arm64/kvm/pmu.c | 88 ++++++++++++++++++++++++++++++++++++++- arch/arm64/kvm/sys_regs.c | 3 ++ virt/kvm/arm/arm.c | 2 + 6 files changed, 103 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 2d721ab05925..075e1921fdd9 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -368,6 +368,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} + static inline void kvm_arm_vhe_guest_enter(void) {} static inline void kvm_arm_vhe_guest_exit(void) {} diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 645d74c705d6..2a8d3f8ca22c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -580,7 +580,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) { - return attr->exclude_host; + return (!has_vhe() && attr->exclude_host); } #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ @@ -594,6 +594,9 @@ void kvm_clr_pmu_events(u32 clr); void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt); bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt); + +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); #else static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 6bb28aaf5aea..314b1adedf06 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -847,8 +847,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, * with other architectures (x86 and Power). */ if (is_kernel_in_hyp_mode()) { - if (!attr->exclude_kernel) + if (!attr->exclude_kernel && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; + if (attr->exclude_guest) + config_base |= ARMV8_PMU_EXCLUDE_EL1; + if (attr->exclude_host) + config_base |= ARMV8_PMU_EXCLUDE_EL0; } else { if (!attr->exclude_hv && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 599e6d3f692e..3f99a095a1ff 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -8,11 +8,19 @@ #include /* - * Given the exclude_{host,guest} attributes, determine if we are going - * to need to switch counters at guest entry/exit. + * Given the perf event attributes and system type, determine + * if we are going to need to switch counters at guest entry/exit. */ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr) { + /** + * With VHE the guest kernel runs at EL1 and the host at EL2, + * where user (EL0) is excluded then we have no reason to switch + * counters. + */ + if (has_vhe() && attr->exclude_user) + return false; + /* Only switch if attributes are different */ return (attr->exclude_host != attr->exclude_guest); } @@ -82,3 +90,79 @@ void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) if (pmu->events_host) write_sysreg(pmu->events_host, pmcntenset_el0); } + +/* + * Modify ARMv8 PMU events to include EL0 counting + */ +static void kvm_vcpu_pmu_enable_el0(unsigned long events) +{ + u64 typer; + u32 counter; + + for_each_set_bit(counter, &events, 32) { + write_sysreg(counter, pmselr_el0); + isb(); + typer = read_sysreg(pmxevtyper_el0) & ~ARMV8_PMU_EXCLUDE_EL0; + write_sysreg(typer, pmxevtyper_el0); + isb(); + } +} + +/* + * Modify ARMv8 PMU events to exclude EL0 counting + */ +static void kvm_vcpu_pmu_disable_el0(unsigned long events) +{ + u64 typer; + u32 counter; + + for_each_set_bit(counter, &events, 32) { + write_sysreg(counter, pmselr_el0); + isb(); + typer = read_sysreg(pmxevtyper_el0) | ARMV8_PMU_EXCLUDE_EL0; + write_sysreg(typer, pmxevtyper_el0); + isb(); + } +} + +/* + * On VHE ensure that only guest events have EL0 counting enabled + */ +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_host_data *host; + u32 events_guest, events_host; + + if (!has_vhe()) + return; + + host_ctxt = vcpu->arch.host_cpu_context; + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + events_guest = host->pmu_events.events_guest; + events_host = host->pmu_events.events_host; + + kvm_vcpu_pmu_enable_el0(events_guest); + kvm_vcpu_pmu_disable_el0(events_host); +} + +/* + * On VHE ensure that only host events have EL0 counting enabled + */ +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_host_data *host; + u32 events_guest, events_host; + + if (!has_vhe()) + return; + + host_ctxt = vcpu->arch.host_cpu_context; + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + events_guest = host->pmu_events.events_guest; + events_host = host->pmu_events.events_host; + + kvm_vcpu_pmu_enable_el0(events_host); + kvm_vcpu_pmu_disable_el0(events_guest); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 12bd72e42b91..9d02643bc601 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -695,6 +695,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val |= p->regval & ARMV8_PMU_PMCR_MASK; __vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); + kvm_vcpu_pmu_restore_guest(vcpu); } else { /* PMCR.P & PMCR.C are RAZ */ val = __vcpu_sys_reg(vcpu, PMCR_EL0) @@ -850,6 +851,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; + kvm_vcpu_pmu_restore_guest(vcpu); } else { p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; } @@ -875,6 +877,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* accessing PMCNTENSET_EL0 */ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; kvm_pmu_enable_counter(vcpu, val); + kvm_vcpu_pmu_restore_guest(vcpu); } else { /* accessing PMCNTENCLR_EL0 */ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index e960b91551d6..8b7ca101f0f7 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -382,6 +382,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_timer_vcpu_load(vcpu); kvm_vcpu_load_sysregs(vcpu); kvm_arch_vcpu_load_fp(vcpu); + kvm_vcpu_pmu_restore_guest(vcpu); if (single_task_running()) vcpu_clear_wfe_traps(vcpu); @@ -397,6 +398,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_vcpu_put_sysregs(vcpu); kvm_timer_vcpu_put(vcpu); kvm_vgic_put(vcpu); + kvm_vcpu_pmu_restore_host(vcpu); vcpu->cpu = -1; -- cgit v1.2.3