From 3a3604bc5eb4ae21ec95b13fdd15959e8f70c434 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:19:45 +0000 Subject: arm64: KVM: Switch to C-based stage2 init There is no real need to leave the stage2 initialization as part of the early HYP bootstrap, and we can easily postpone it to the point where we can safely run C code. This will help VHE, which doesn't need any of this bootstrap. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp-init.S | 15 +-------------- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/s2-setup.c | 44 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 14 deletions(-) create mode 100644 arch/arm64/kvm/hyp/s2-setup.c (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index d073b5a216f7..7d8747c6427c 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -87,26 +87,13 @@ __do_hyp_init: #endif /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in - * TCR_EL2 and VTCR_EL2. + * TCR_EL2. */ mrs x5, ID_AA64MMFR0_EL1 bfi x4, x5, #16, #3 msr tcr_el2, x4 - ldr x4, =VTCR_EL2_FLAGS - bfi x4, x5, #16, #3 - /* - * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in - * VTCR_EL2. - */ - mrs x5, ID_AA64MMFR1_EL1 - ubfx x5, x5, #5, #1 - lsl x5, x5, #VTCR_EL2_VS - orr x4, x4, x5 - - msr vtcr_el2, x4 - mrs x4, mair_el1 msr mair_el2, x4 isb diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 826032bc3945..5326e664fd41 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o +obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..17e8cc09a1d8 --- /dev/null +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "hyp.h" + +void __hyp_text __init_stage2_translation(void) +{ + u64 val = VTCR_EL2_FLAGS; + u64 tmp; + + /* + * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS + * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while + * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... + */ + val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16; + + /* + * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS + * bit in VTCR_EL2. + */ + tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; + val |= (tmp == 2) ? VTCR_EL2_VS : 0; + + write_sysreg(val, vtcr_el2); +} -- cgit v1.2.3 From b81125c791a2958cc60ae968fc1cdea82b7cd3b0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:52:12 +0000 Subject: arm64: KVM: VHE: Patch out use of HVC With VHE, the host never issues an HVC instruction to get into the KVM code, as we can simply branch there. Use runtime code patching to simplify things a bit. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp.S | 7 +++++++ arch/arm64/kvm/hyp/hyp-entry.S | 40 +++++++++++++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 9 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 0ccdcbbef3c2..0689a74e6ba0 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -17,7 +17,9 @@ #include +#include #include +#include /* * u64 kvm_call_hyp(void *hypfn, ...); @@ -38,6 +40,11 @@ * arch/arm64/kernel/hyp_stub.S. */ ENTRY(kvm_call_hyp) +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ret +alternative_else + b __vhe_hyp_call + nop +alternative_endif ENDPROC(kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 93e8d983c0bd..1bdeee70833e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -38,6 +38,34 @@ ldp x0, x1, [sp], #16 .endm +.macro do_el2_call + /* + * Shuffle the parameters before calling the function + * pointed to in x0. Assumes parameters in x[1,2,3]. + */ + sub sp, sp, #16 + str lr, [sp] + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + ldr lr, [sp] + add sp, sp, #16 +.endm + +ENTRY(__vhe_hyp_call) + do_el2_call + /* + * We used to rely on having an exception return to get + * an implicit isb. In the E2H case, we don't have it anymore. + * rather than changing all the leaf functions, just do it here + * before returning to the rest of the kernel. + */ + isb + ret +ENDPROC(__vhe_hyp_call) + el1_sync: // Guest trapped into EL2 save_x0_to_x3 @@ -58,19 +86,13 @@ el1_sync: // Guest trapped into EL2 mrs x0, vbar_el2 b 2f -1: stp lr, xzr, [sp, #-16]! - +1: /* - * Compute the function address in EL2, and shuffle the parameters. + * Perform the EL2 call */ kern_hyp_va x0 - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr + do_el2_call - ldp lr, xzr, [sp], #16 2: eret el1_trap: -- cgit v1.2.3 From cedbb8b78c4f09f0d4519d5d35519b64487f1f0a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:50:34 +0000 Subject: arm64: KVM: VHE: Patch out kern_hyp_va The kern_hyp_va macro is pretty meaninless with VHE, as there is only one mapping - the kernel one. In order to keep the code readable and efficient, use runtime patching to replace the 'and' instruction used to compute the VA with a 'nop'. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 12 +++++++++++- arch/arm64/kvm/hyp/hyp.h | 25 ++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 736433912a1e..9a9318adefa6 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -23,13 +23,16 @@ #include /* - * As we only have the TTBR0_EL2 register, we cannot express + * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express * "negative" addresses. This makes it impossible to directly share * mappings with the kernel. * * Instead, give the HYP mode its own VA region at a fixed offset from * the kernel by just masking the top bits (which are all ones for a * kernel address). + * + * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these + * macros (the entire kernel runs at EL2). */ #define HYP_PAGE_OFFSET_SHIFT VA_BITS #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) @@ -56,12 +59,19 @@ #ifdef __ASSEMBLY__ +#include +#include + /* * Convert a kernel VA into a HYP VA. * reg: VA to be converted. */ .macro kern_hyp_va reg +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN and \reg, \reg, #HYP_PAGE_OFFSET_MASK +alternative_else + nop +alternative_endif .endm #else diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index fb275178b6af..fc502f356147 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -25,9 +25,28 @@ #define __hyp_text __section(.hyp.text) notrace -#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK) -#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ - + PAGE_OFFSET) +static inline unsigned long __kern_hyp_va(unsigned long v) +{ + asm volatile(ALTERNATIVE("and %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); + return v; +} + +#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) + +static inline unsigned long __hyp_kern_va(unsigned long v) +{ + u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; + asm volatile(ALTERNATIVE("add %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "r" (offset)); + return v; +} + +#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) /** * hyp_alternate_select - Generates patchable code sequences that are -- cgit v1.2.3 From 915ccd1dbf21e0621fb9415ad76e5c2b31ec137a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:00:00 +0000 Subject: arm64: KVM: VHE: Introduce unified system register accessors VHE brings its own bag of new system registers, or rather system register accessors, as it define new ways to access both guest and host system registers. For example, from the host: - The host TCR_EL2 register is accessed using the TCR_EL1 accessor - The guest TCR_EL1 register is accessed using the TCR_EL12 accessor Obviously, this is confusing. A way to somehow reduce the complexity of writing code for both ARMv8 and ARMv8.1 is to use a set of unified accessors that will generate the right sysreg, depending on the mode the CPU is running in. For example: - read_sysreg_el1(tcr) will use TCR_EL1 on ARMv8, and TCR_EL12 on ARMv8.1 with VHE. - read_sysreg_el2(tcr) will use TCR_EL2 on ARMv8, and TCR_EL1 on ARMv8.1 with VHE. We end up with three sets of accessors ({read,write}_sysreg_el[012]) that can be directly used from C code. We take this opportunity to also add the definition for the new VHE sysregs. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/hyp.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index fc502f356147..744c919cc8ef 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -48,6 +48,78 @@ static inline unsigned long __hyp_kern_va(unsigned long v) #define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) +#define read_sysreg_elx(r,nvh,vh) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ + "mrs_s %0, " __stringify(r##vh),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_elx(v,r,nvh,vh) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ + "msr_s " __stringify(r##vh) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding. + */ +#define read_sysreg_el2(r) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ + "mrs %0, " __stringify(r##_EL1),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_el2(v,r) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ + "msr " __stringify(r##_EL1) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) +#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) +#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) +#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) + +/* The VHE specific system registers and their encoding */ +#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) +#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) +#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) +#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) +#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) +#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) +#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) +#define esr_EL12 sys_reg(3, 5, 5, 2, 0) +#define far_EL12 sys_reg(3, 5, 6, 0, 0) +#define mair_EL12 sys_reg(3, 5, 10, 2, 0) +#define amair_EL12 sys_reg(3, 5, 10, 3, 0) +#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) +#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) +#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) +#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) +#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) +#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) +#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) +#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) +#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) +#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) +#define elr_EL12 sys_reg(3, 5, 4, 0, 1) + /** * hyp_alternate_select - Generates patchable code sequences that are * used to switch between two implementations of a function, depending -- cgit v1.2.3 From edef528dc4bdab1504e72e0f5436b18f3777efc0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:17:35 +0000 Subject: arm64: KVM: VHE: Differenciate host/guest sysreg save/restore With ARMv8, host and guest share the same system register file, making the save/restore procedure completely symetrical. With VHE, host and guest now have different requirements, as they use different sysregs. In order to prepare for this, add split sysreg save/restore functions for both host and guest. No functional changes yet. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/hyp.h | 6 ++++-- arch/arm64/kvm/hyp/switch.c | 10 +++++----- arch/arm64/kvm/hyp/sysreg-sr.c | 24 ++++++++++++++++++++++-- 3 files changed, 31 insertions(+), 9 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 744c919cc8ef..5dfa8838a3e1 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -153,8 +153,10 @@ void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); -void __sysreg_save_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); void __sysreg32_save_state(struct kvm_vcpu *vcpu); void __sysreg32_restore_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f0e7bdfae134..68f3cba25910 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -102,7 +102,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); guest_ctxt = &vcpu->arch.ctxt; - __sysreg_save_state(host_ctxt); + __sysreg_save_host_state(host_ctxt); __debug_cond_save_host_state(vcpu); __activate_traps(vcpu); @@ -116,7 +116,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) * to Cortex-A57 erratum #852523. */ __sysreg32_restore_state(vcpu); - __sysreg_restore_state(guest_ctxt); + __sysreg_restore_guest_state(guest_ctxt); __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); /* Jump in the fire! */ @@ -125,7 +125,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) fp_enabled = __fpsimd_enabled(); - __sysreg_save_state(guest_ctxt); + __sysreg_save_guest_state(guest_ctxt); __sysreg32_save_state(vcpu); __timer_save_state(vcpu); __vgic_save_state(vcpu); @@ -133,7 +133,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __deactivate_traps(vcpu); __deactivate_vm(vcpu); - __sysreg_restore_state(host_ctxt); + __sysreg_restore_host_state(host_ctxt); if (fp_enabled) { __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); @@ -165,7 +165,7 @@ void __hyp_text __noreturn __hyp_panic(void) host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); __deactivate_traps(vcpu); __deactivate_vm(vcpu); - __sysreg_restore_state(host_ctxt); + __sysreg_restore_host_state(host_ctxt); } /* Call panic for real */ diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 425630980229..bd5b543f90da 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -24,7 +24,7 @@ #include "hyp.h" /* ctxt is already in the HYP VA space */ -void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); @@ -57,7 +57,17 @@ void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); } -void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_save_state(ctxt); +} + +void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_save_state(ctxt); +} + +static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) { write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); @@ -90,6 +100,16 @@ void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); } +void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_restore_state(ctxt); +} + +void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_restore_state(ctxt); +} + void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) { u64 *spsr, *sysreg; -- cgit v1.2.3 From 9c6c35683286ddf47baf8c6d1931f3af63379490 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:39:38 +0000 Subject: arm64: KVM: VHE: Split save/restore of registers shared between guest and host A handful of system registers are still shared between host and guest, even while using VHE (tpidr*_el[01] and actlr_el1). Also, some of the vcpu state (sp_el0, PC and PSTATE) must be save/restored on entry/exit, as they are used on the host as well. In order to facilitate the introduction of a VHE-specific sysreg save/restore, make move the access to these registers to their own save/restore functions. No functional change. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/sysreg-sr.c | 48 +++++++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 15 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index bd5b543f90da..61bad17a1d11 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -23,13 +23,29 @@ #include "hyp.h" -/* ctxt is already in the HYP VA space */ +/* + * Non-VHE: Both host and guest must save everything. + * + * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and + * guest must save everything. + */ + +static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) +{ + ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); + ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); + ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); + ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); + ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); + ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); +} + static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); - ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); @@ -41,17 +57,11 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); - ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); - ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); - ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); - ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); - ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); - ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); @@ -60,11 +70,24 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) { __sysreg_save_state(ctxt); + __sysreg_save_common_state(ctxt); } void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt) { __sysreg_save_state(ctxt); + __sysreg_save_common_state(ctxt); +} + +static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); + write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); + write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); + write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); + write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); + write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); } static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) @@ -72,7 +95,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); - write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); @@ -84,17 +106,11 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); - write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); - write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); - write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); - write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); - write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); - write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); @@ -103,11 +119,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) { __sysreg_restore_state(ctxt); + __sysreg_restore_common_state(ctxt); } void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt) { __sysreg_restore_state(ctxt); + __sysreg_restore_common_state(ctxt); } void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 094f8233c0da602712e8a206984431026a1530aa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:56:25 +0000 Subject: arm64: KVM: VHE: Use unified system register accessors Use the recently introduced unified system register accessors for those sysregs that behave differently depending on VHE being in use or not. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/sysreg-sr.c | 84 +++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 42 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 61bad17a1d11..7d7d75732a62 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -37,34 +37,34 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); - ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); - ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); + ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); + ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); } static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); - ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); - ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); - ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); - ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); - ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1); - ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1); - ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1); - ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1); - ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1); - ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); - ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); - ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); - ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); - ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); + ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); + ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr); + ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0); + ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1); + ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr); + ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr); + ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0); + ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1); + ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far); + ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair); + ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar); + ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr); + ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); + ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); - ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); - ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); + ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); + ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); } void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) @@ -86,34 +86,34 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); - write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); - write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); + write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); } static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) { - write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); - write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); - write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); - write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); - write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); - write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1); - write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1); - write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1); - write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1); - write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1); - write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); - write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); - write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); - write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); - write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); - write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); - write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); - - write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); - write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); - write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); + write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); + write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); + write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr); + write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr); + write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0); + write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1); + write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr); + write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr); + write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0); + write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1); + write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far); + write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair); + write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar); + write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr); + write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); + write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); + write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); + write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); + + write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); + write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); + write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); } void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) -- cgit v1.2.3 From d1526e5efc3978efe8c9c37a2396d91e4702251b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 13:59:46 +0000 Subject: arm64: KVM: VHE: Enable minimal sysreg save/restore We're now in a position where we can introduce VHE's minimal save/restore, which is limited to the handful of shared sysregs. Add the required alternative function calls that result in a "do nothing" call on VHE, and the normal save/restore for non-VHE. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/sysreg-sr.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7d7d75732a62..74b5f81678c2 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -23,6 +23,9 @@ #include "hyp.h" +/* Yes, this does nothing, on purpose */ +static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } + /* * Non-VHE: Both host and guest must save everything. * @@ -67,9 +70,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); } +static hyp_alternate_select(__sysreg_call_save_host_state, + __sysreg_save_state, __sysreg_do_nothing, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) { - __sysreg_save_state(ctxt); + __sysreg_call_save_host_state()(ctxt); __sysreg_save_common_state(ctxt); } @@ -116,9 +123,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); } +static hyp_alternate_select(__sysreg_call_restore_host_state, + __sysreg_restore_state, __sysreg_do_nothing, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) { - __sysreg_restore_state(ctxt); + __sysreg_call_restore_host_state()(ctxt); __sysreg_restore_common_state(ctxt); } -- cgit v1.2.3 From 328762247cd33b4533f9dd89a4faf40288f359b7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 14:15:45 +0000 Subject: arm64: KVM: VHE: Make __fpsimd_enabled VHE aware As non-VHE and VHE have different ways to express the trapping of FPSIMD registers to EL2, make __fpsimd_enabled a patchable predicate and provide a VHE implementation. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 3 +++ arch/arm64/kvm/hyp/hyp.h | 5 +---- arch/arm64/kvm/hyp/switch.c | 19 +++++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index d201d4b396d1..afa2f4a96210 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -216,4 +216,7 @@ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64) +#define CPACR_EL1_FPEN (3 << 20) +#define CPACR_EL1_TTA (1 << 28) + #endif /* __ARM64_KVM_ARM_H__ */ diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 5dfa8838a3e1..44eaff70da6a 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -171,10 +171,7 @@ void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -static inline bool __fpsimd_enabled(void) -{ - return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); -} +bool __fpsimd_enabled(void); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); void __noreturn __hyp_do_panic(unsigned long, ...); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 68f3cba25910..0d82ae921b9c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -17,6 +17,25 @@ #include "hyp.h" +static bool __hyp_text __fpsimd_enabled_nvhe(void) +{ + return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); +} + +static bool __hyp_text __fpsimd_enabled_vhe(void) +{ + return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); +} + +static hyp_alternate_select(__fpsimd_is_enabled, + __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +bool __hyp_text __fpsimd_enabled(void) +{ + return __fpsimd_is_enabled()(); +} + static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 val; -- cgit v1.2.3 From 68908bf789b7fd376538a4bad8367d5dcb9ec983 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 15:47:55 +0000 Subject: arm64: KVM: VHE: Implement VHE activate/deactivate_traps Running the kernel in HYP mode requires the HCR_E2H bit to be set at all times, and the HCR_TGE bit to be set when running as a host (and cleared when running as a guest). At the same time, the vector must be set to the current role of the kernel (either host or hypervisor), and a couple of system registers differ between VHE and non-VHE. We implement these by using another set of alternate functions that get dynamically patched. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 3 +- arch/arm64/include/asm/kvm_emulate.h | 3 ++ arch/arm64/kvm/hyp/switch.c | 53 ++++++++++++++++++++++++++++++++---- 3 files changed, 52 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index afa2f4a96210..b56a0a81e4cb 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -23,6 +23,7 @@ #include /* Hyp Configuration Register (HCR) bits */ +#define HCR_E2H (UL(1) << 34) #define HCR_ID (UL(1) << 33) #define HCR_CD (UL(1) << 32) #define HCR_RW_SHIFT 31 @@ -81,7 +82,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) - +#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* Hyp System Control Register (SCTLR_EL2) bits */ #define SCTLR_EL2_EE (1 << 25) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4df8e7a58c6b..40bc1681b6d5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -29,6 +29,7 @@ #include #include #include +#include unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); @@ -43,6 +44,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + if (is_kernel_in_hyp_mode()) + vcpu->arch.hcr_el2 |= HCR_E2H; if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) vcpu->arch.hcr_el2 &= ~HCR_RW; } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 0d82ae921b9c..e609942ef79c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,6 +15,8 @@ * along with this program. If not, see . */ +#include + #include "hyp.h" static bool __hyp_text __fpsimd_enabled_nvhe(void) @@ -36,6 +38,31 @@ bool __hyp_text __fpsimd_enabled(void) return __fpsimd_is_enabled()(); } +static void __hyp_text __activate_traps_vhe(void) +{ + u64 val; + + val = read_sysreg(cpacr_el1); + val |= CPACR_EL1_TTA; + val &= ~CPACR_EL1_FPEN; + write_sysreg(val, cpacr_el1); + + write_sysreg(__kvm_hyp_vector, vbar_el1); +} + +static void __hyp_text __activate_traps_nvhe(void) +{ + u64 val; + + val = CPTR_EL2_DEFAULT; + val |= CPTR_EL2_TTA | CPTR_EL2_TFP; + write_sysreg(val, cptr_el2); +} + +static hyp_alternate_select(__activate_traps_arch, + __activate_traps_nvhe, __activate_traps_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 val; @@ -55,20 +82,34 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + __activate_traps_arch()(); +} - val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TFP; - write_sysreg(val, cptr_el2); +static void __hyp_text __deactivate_traps_vhe(void) +{ + extern char vectors[]; /* kernel exception vectors */ - write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + write_sysreg(CPACR_EL1_FPEN, cpacr_el1); + write_sysreg(vectors, vbar_el1); } -static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +static void __hyp_text __deactivate_traps_nvhe(void) { write_sysreg(HCR_RW, hcr_el2); + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); +} + +static hyp_alternate_select(__deactivate_traps_arch, + __deactivate_traps_nvhe, __deactivate_traps_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + __deactivate_traps_arch()(); write_sysreg(0, hstr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); - write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 5efe6de13807fe927f0ecc63d83197b5cd3c7782 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 14:36:20 +0000 Subject: arm64: KVM: VHE: Use unified sysreg accessors for timer Switch the timer code to the unified sysreg accessors. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/timer-sr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c index 1051e5d7320f..f276d9e74411 100644 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ b/arch/arm64/kvm/hyp/timer-sr.c @@ -31,12 +31,12 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) u64 val; if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg(cntv_ctl_el0); - timer->cntv_cval = read_sysreg(cntv_cval_el0); + timer->cntv_ctl = read_sysreg_el0(cntv_ctl); + timer->cntv_cval = read_sysreg_el0(cntv_cval); } /* Disable the virtual timer */ - write_sysreg(0, cntv_ctl_el0); + write_sysreg_el0(0, cntv_ctl); /* Allow physical timer/counter access for the host */ val = read_sysreg(cnthctl_el2); @@ -64,8 +64,8 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) if (kvm->arch.timer.enabled) { write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); - write_sysreg(timer->cntv_cval, cntv_cval_el0); + write_sysreg_el0(timer->cntv_cval, cntv_cval); isb(); - write_sysreg(timer->cntv_ctl, cntv_ctl_el0); + write_sysreg_el0(timer->cntv_ctl, cntv_ctl); } } -- cgit v1.2.3 From 77cb2d91333312d7426055d4369f3821e5e8bda0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 14:42:09 +0000 Subject: arm64: KVM: VHE: Add fpsimd enabling on guest access Despite the fact that a VHE enabled kernel runs at EL2, it uses CPACR_EL1 to trap FPSIMD access. Add the required alternative code to re-enable guest FPSIMD access when it has trapped to EL2. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/entry.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index fd0fbe9b7e6a..ce9e5e5f28cf 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -130,9 +130,15 @@ ENDPROC(__guest_exit) ENTRY(__fpsimd_guest_restore) stp x4, lr, [sp, #-16]! +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x2, cptr_el2 bic x2, x2, #CPTR_EL2_TFP msr cptr_el2, x2 +alternative_else + mrs x2, cpacr_el1 + orr x2, x2, #CPACR_EL1_FPEN + msr cpacr_el1, x2 +alternative_endif isb mrs x3, tpidr_el2 -- cgit v1.2.3 From 253dcbd39adb00890f3c350230ae310fcfeeb760 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 17 Nov 2015 14:07:45 +0000 Subject: arm64: KVM: VHE: Add alternative panic handling As the kernel fully runs in HYP when VHE is enabled, we can directly branch to the kernel's panic() implementation, and not perform an exception return. Add the alternative code to deal with this. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/switch.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index e609942ef79c..731f0a2ffee0 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -210,11 +210,34 @@ __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; -void __hyp_text __noreturn __hyp_panic(void) +static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) { unsigned long str_va = (unsigned long)__hyp_panic_string; - u64 spsr = read_sysreg(spsr_el2); - u64 elr = read_sysreg(elr_el2); + + __hyp_do_panic(hyp_kern_va(str_va), + spsr, elr, + read_sysreg(esr_el2), read_sysreg_el2(far), + read_sysreg(hpfar_el2), par, + (void *)read_sysreg(tpidr_el2)); +} + +static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) +{ + panic(__hyp_panic_string, + spsr, elr, + read_sysreg_el2(esr), read_sysreg_el2(far), + read_sysreg(hpfar_el2), par, + (void *)read_sysreg(tpidr_el2)); +} + +static hyp_alternate_select(__hyp_call_panic, + __hyp_call_panic_nvhe, __hyp_call_panic_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +void __hyp_text __noreturn __hyp_panic(void) +{ + u64 spsr = read_sysreg_el2(spsr); + u64 elr = read_sysreg_el2(elr); u64 par = read_sysreg(par_el1); if (read_sysreg(vttbr_el2)) { @@ -229,11 +252,7 @@ void __hyp_text __noreturn __hyp_panic(void) } /* Call panic for real */ - __hyp_do_panic(hyp_kern_va(str_va), - spsr, elr, - read_sysreg(esr_el2), read_sysreg(far_el2), - read_sysreg(hpfar_el2), par, - (void *)read_sysreg(tpidr_el2)); + __hyp_call_panic()(spsr, elr, par); unreachable(); } -- cgit v1.2.3 From 5f05a72aed023e5824eebb2542b5397cb89188f4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 15:06:47 +0000 Subject: arm64: KVM: Move most of the fault decoding to C The fault decoding process (including computing the IPA in the case of a permission fault) would be much better done in C code, as we have a reasonable infrastructure to deal with the VHE/non-VHE differences. Let's move the whole thing to C, including the workaround for erratum 834220, and just patch the odd ESR_EL2 access remaining in hyp-entry.S. Reviewed-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm64/kernel/asm-offsets.c | 3 -- arch/arm64/kvm/hyp/hyp-entry.S | 69 +++------------------------------ arch/arm64/kvm/hyp/switch.c | 85 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 67 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index fffa4ac6c25a..b0ab4e93db0d 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -110,9 +110,6 @@ int main(void) DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); - DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); - DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); - DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); #endif #ifdef CONFIG_CPU_PM diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 1bdeee70833e..3488894397ff 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -69,7 +68,11 @@ ENDPROC(__vhe_hyp_call) el1_sync: // Guest trapped into EL2 save_x0_to_x3 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x1, esr_el2 +alternative_else + mrs x1, esr_el1 +alternative_endif lsr x2, x1, #ESR_ELx_EC_SHIFT cmp x2, #ESR_ELx_EC_HVC64 @@ -105,72 +108,10 @@ el1_trap: cmp x2, #ESR_ELx_EC_FP_ASIMD b.eq __fpsimd_guest_restore - cmp x2, #ESR_ELx_EC_DABT_LOW - mov x0, #ESR_ELx_EC_IABT_LOW - ccmp x2, x0, #4, ne - b.ne 1f // Not an abort we care about - - /* This is an abort. Check for permission fault */ -alternative_if_not ARM64_WORKAROUND_834220 - and x2, x1, #ESR_ELx_FSC_TYPE - cmp x2, #FSC_PERM - b.ne 1f // Not a permission fault -alternative_else - nop // Use the permission fault path to - nop // check for a valid S1 translation, - nop // regardless of the ESR value. -alternative_endif - - /* - * Check for Stage-1 page table walk, which is guaranteed - * to give a valid HPFAR_EL2. - */ - tbnz x1, #7, 1f // S1PTW is set - - /* Preserve PAR_EL1 */ - mrs x3, par_el1 - stp x3, xzr, [sp, #-16]! - - /* - * Permission fault, HPFAR_EL2 is invalid. - * Resolve the IPA the hard way using the guest VA. - * Stage-1 translation already validated the memory access rights. - * As such, we can use the EL1 translation regime, and don't have - * to distinguish between EL0 and EL1 access. - */ - mrs x2, far_el2 - at s1e1r, x2 - isb - - /* Read result */ - mrs x3, par_el1 - ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack - msr par_el1, x0 - tbnz x3, #0, 3f // Bail out if we failed the translation - ubfx x3, x3, #12, #36 // Extract IPA - lsl x3, x3, #4 // and present it like HPFAR - b 2f - -1: mrs x3, hpfar_el2 - mrs x2, far_el2 - -2: mrs x0, tpidr_el2 - str w1, [x0, #VCPU_ESR_EL2] - str x2, [x0, #VCPU_FAR_EL2] - str x3, [x0, #VCPU_HPFAR_EL2] - + mrs x0, tpidr_el2 mov x1, #ARM_EXCEPTION_TRAP b __guest_exit - /* - * Translation failed. Just return to the guest and - * let it fault again. Another CPU is probably playing - * behind our back. - */ -3: restore_x0_to_x3 - - eret - el1_irq: save_x0_to_x3 mrs x0, tpidr_el2 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 731f0a2ffee0..ecf5b05d1e16 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include "hyp.h" @@ -149,6 +150,86 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) __vgic_call_restore_state()(vcpu); } +static bool __hyp_text __true_value(void) +{ + return true; +} + +static bool __hyp_text __false_value(void) +{ + return false; +} + +static hyp_alternate_select(__check_arm_834220, + __false_value, __true_value, + ARM64_WORKAROUND_834220); + +static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) +{ + u64 par, tmp; + + /* + * Resolve the IPA the hard way using the guest VA. + * + * Stage-1 translation already validated the memory access + * rights. As such, we can use the EL1 translation regime, and + * don't have to distinguish between EL0 and EL1 access. + * + * We do need to save/restore PAR_EL1 though, as we haven't + * saved the guest context yet, and we may return early... + */ + par = read_sysreg(par_el1); + asm volatile("at s1e1r, %0" : : "r" (far)); + isb(); + + tmp = read_sysreg(par_el1); + write_sysreg(par, par_el1); + + if (unlikely(tmp & 1)) + return false; /* Translation failed, back to guest */ + + /* Convert PAR to HPFAR format */ + *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; + return true; +} + +static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u64 esr = read_sysreg_el2(esr); + u8 ec = esr >> ESR_ELx_EC_SHIFT; + u64 hpfar, far; + + vcpu->arch.fault.esr_el2 = esr; + + if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) + return true; + + far = read_sysreg_el2(far); + + /* + * The HPFAR can be invalid if the stage 2 fault did not + * happen during a stage 1 page table walk (the ESR_EL2.S1PTW + * bit is clear) and one of the two following cases are true: + * 1. The fault was due to a permission fault + * 2. The processor carries errata 834220 + * + * Therefore, for all non S1PTW faults where we either have a + * permission fault or the errata workaround is enabled, we + * resolve the IPA using the AT instruction. + */ + if (!(esr & ESR_ELx_S1PTW) && + (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { + if (!__translate_far_to_hpfar(far, &hpfar)) + return false; + } else { + hpfar = read_sysreg(hpfar_el2); + } + + vcpu->arch.fault.far_el2 = far; + vcpu->arch.fault.hpfar_el2 = hpfar; + return true; +} + static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -180,9 +261,13 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); /* Jump in the fire! */ +again: exit_code = __guest_enter(vcpu, host_ctxt); /* And we're baaack! */ + if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) + goto again; + fp_enabled = __fpsimd_enabled(); __sysreg_save_guest_state(guest_ctxt); -- cgit v1.2.3 From 13720a56edbd8164fbfa251067dea9776e09f54b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 13:44:07 +0000 Subject: arm64: KVM: Move kvm/hyp/hyp.h to include/asm/kvm_hyp.h In order to be able to move code outside of kvm/hyp, we need to make the global hyp.h file accessible from a standard location. include/asm/kvm_hyp.h seems good enough. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 180 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/debug-sr.c | 4 +- arch/arm64/kvm/hyp/hyp.h | 180 --------------------------------------- arch/arm64/kvm/hyp/s2-setup.c | 3 +- arch/arm64/kvm/hyp/switch.c | 3 +- arch/arm64/kvm/hyp/sysreg-sr.c | 4 +- arch/arm64/kvm/hyp/timer-sr.c | 4 +- arch/arm64/kvm/hyp/tlb.c | 2 +- arch/arm64/kvm/hyp/vgic-v2-sr.c | 4 +- arch/arm64/kvm/hyp/vgic-v3-sr.c | 4 +- 10 files changed, 188 insertions(+), 200 deletions(-) create mode 100644 arch/arm64/include/asm/kvm_hyp.h delete mode 100644 arch/arm64/kvm/hyp/hyp.h (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..44eaff70da6a --- /dev/null +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM64_KVM_HYP_H__ +#define __ARM64_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +static inline unsigned long __kern_hyp_va(unsigned long v) +{ + asm volatile(ALTERNATIVE("and %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); + return v; +} + +#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) + +static inline unsigned long __hyp_kern_va(unsigned long v) +{ + u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; + asm volatile(ALTERNATIVE("add %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "r" (offset)); + return v; +} + +#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) + +#define read_sysreg_elx(r,nvh,vh) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ + "mrs_s %0, " __stringify(r##vh),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_elx(v,r,nvh,vh) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ + "msr_s " __stringify(r##vh) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding. + */ +#define read_sysreg_el2(r) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ + "mrs %0, " __stringify(r##_EL1),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_el2(v,r) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ + "msr " __stringify(r##_EL1) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) +#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) +#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) +#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) + +/* The VHE specific system registers and their encoding */ +#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) +#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) +#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) +#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) +#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) +#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) +#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) +#define esr_EL12 sys_reg(3, 5, 5, 2, 0) +#define far_EL12 sys_reg(3, 5, 6, 0, 0) +#define mair_EL12 sys_reg(3, 5, 10, 2, 0) +#define amair_EL12 sys_reg(3, 5, 10, 3, 0) +#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) +#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) +#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) +#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) +#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) +#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) +#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) +#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) +#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) +#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) +#define elr_EL12 sys_reg(3, 5, 4, 0, 1) + +/** + * hyp_alternate_select - Generates patchable code sequences that are + * used to switch between two implementations of a function, depending + * on the availability of a feature. + * + * @fname: a symbol name that will be defined as a function returning a + * function pointer whose type will match @orig and @alt + * @orig: A pointer to the default function, as returned by @fname when + * @cond doesn't hold + * @alt: A pointer to the alternate function, as returned by @fname + * when @cond holds + * @cond: a CPU feature (as described in asm/cpufeature.h) + */ +#define hyp_alternate_select(fname, orig, alt, cond) \ +typeof(orig) * __hyp_text fname(void) \ +{ \ + typeof(alt) *val = orig; \ + asm volatile(ALTERNATIVE("nop \n", \ + "mov %0, %1 \n", \ + cond) \ + : "+r" (val) : "r" (alt)); \ + return val; \ +} + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v3_save_state(struct kvm_vcpu *vcpu); +void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg32_save_state(struct kvm_vcpu *vcpu); +void __sysreg32_restore_state(struct kvm_vcpu *vcpu); + +void __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); +void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); + +void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); +void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +bool __fpsimd_enabled(void); + +u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); +void __noreturn __hyp_do_panic(unsigned long, ...); + +#endif /* __ARM64_KVM_HYP_H__ */ + diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index c9c1e97501a9..053cf8b057c1 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include #define read_debug(r,n) read_sysreg(r##n##_el1) #define write_debug(v,r,n) write_sysreg(v, r##n##_el1) diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h deleted file mode 100644 index 44eaff70da6a..000000000000 --- a/arch/arm64/kvm/hyp/hyp.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_KVM_HYP_H__ -#define __ARM64_KVM_HYP_H__ - -#include -#include -#include -#include - -#define __hyp_text __section(.hyp.text) notrace - -static inline unsigned long __kern_hyp_va(unsigned long v) -{ - asm volatile(ALTERNATIVE("and %0, %0, %1", - "nop", - ARM64_HAS_VIRT_HOST_EXTN) - : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); - return v; -} - -#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) - -static inline unsigned long __hyp_kern_va(unsigned long v) -{ - u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; - asm volatile(ALTERNATIVE("add %0, %0, %1", - "nop", - ARM64_HAS_VIRT_HOST_EXTN) - : "+r" (v) : "r" (offset)); - return v; -} - -#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) - -#define read_sysreg_elx(r,nvh,vh) \ - ({ \ - u64 reg; \ - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ - "mrs_s %0, " __stringify(r##vh),\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : "=r" (reg)); \ - reg; \ - }) - -#define write_sysreg_elx(v,r,nvh,vh) \ - do { \ - u64 __val = (u64)(v); \ - asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ - "msr_s " __stringify(r##vh) ", %x0",\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : : "rZ" (__val)); \ - } while (0) - -/* - * Unified accessors for registers that have a different encoding - * between VHE and non-VHE. They must be specified without their "ELx" - * encoding. - */ -#define read_sysreg_el2(r) \ - ({ \ - u64 reg; \ - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ - "mrs %0, " __stringify(r##_EL1),\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : "=r" (reg)); \ - reg; \ - }) - -#define write_sysreg_el2(v,r) \ - do { \ - u64 __val = (u64)(v); \ - asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ - "msr " __stringify(r##_EL1) ", %x0",\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : : "rZ" (__val)); \ - } while (0) - -#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) -#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) -#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) -#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) - -/* The VHE specific system registers and their encoding */ -#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) -#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) -#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) -#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) -#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) -#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) -#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) -#define esr_EL12 sys_reg(3, 5, 5, 2, 0) -#define far_EL12 sys_reg(3, 5, 6, 0, 0) -#define mair_EL12 sys_reg(3, 5, 10, 2, 0) -#define amair_EL12 sys_reg(3, 5, 10, 3, 0) -#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) -#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) -#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) -#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) -#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) -#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) -#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) -#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) -#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) -#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) -#define elr_EL12 sys_reg(3, 5, 4, 0, 1) - -/** - * hyp_alternate_select - Generates patchable code sequences that are - * used to switch between two implementations of a function, depending - * on the availability of a feature. - * - * @fname: a symbol name that will be defined as a function returning a - * function pointer whose type will match @orig and @alt - * @orig: A pointer to the default function, as returned by @fname when - * @cond doesn't hold - * @alt: A pointer to the alternate function, as returned by @fname - * when @cond holds - * @cond: a CPU feature (as described in asm/cpufeature.h) - */ -#define hyp_alternate_select(fname, orig, alt, cond) \ -typeof(orig) * __hyp_text fname(void) \ -{ \ - typeof(alt) *val = orig; \ - asm volatile(ALTERNATIVE("nop \n", \ - "mov %0, %1 \n", \ - cond) \ - : "+r" (val) : "r" (alt)); \ - return val; \ -} - -void __vgic_v2_save_state(struct kvm_vcpu *vcpu); -void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); - -void __vgic_v3_save_state(struct kvm_vcpu *vcpu); -void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); - -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); - -void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); -void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); -void __sysreg32_save_state(struct kvm_vcpu *vcpu); -void __sysreg32_restore_state(struct kvm_vcpu *vcpu); - -void __debug_save_state(struct kvm_vcpu *vcpu, - struct kvm_guest_debug_arch *dbg, - struct kvm_cpu_context *ctxt); -void __debug_restore_state(struct kvm_vcpu *vcpu, - struct kvm_guest_debug_arch *dbg, - struct kvm_cpu_context *ctxt); -void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); -void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); - -void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); -void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -bool __fpsimd_enabled(void); - -u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); -void __noreturn __hyp_do_panic(unsigned long, ...); - -#endif /* __ARM64_KVM_HYP_H__ */ - diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c index 17e8cc09a1d8..bfc54fd82797 100644 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -18,8 +18,7 @@ #include #include #include - -#include "hyp.h" +#include void __hyp_text __init_stage2_translation(void) { diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ecf5b05d1e16..7b81e56111ab 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -17,8 +17,7 @@ #include #include - -#include "hyp.h" +#include static bool __hyp_text __fpsimd_enabled_nvhe(void) { diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 74b5f81678c2..0f7c40eb3f53 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* Yes, this does nothing, on purpose */ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c index f276d9e74411..ea00d69e7078 100644 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ b/arch/arm64/kvm/hyp/timer-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 2a7e0d838698..be8177cdd3bf 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -15,7 +15,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c index e71761238cfc..9514a7d90d71 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5dd2a26444ec..0035b2d3fb6d 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include #define vtr_to_max_lr_idx(v) ((v) & 0xf) #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) -- cgit v1.2.3 From 6d50d54cd80cb60d701e70fb0633250115606459 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:24:39 +0000 Subject: arm64: KVM: Move vgic-v2 and timer save/restore to virt/kvm/arm/hyp We already have virt/kvm/arm/ containing timer and vgic stuff. Add yet another subdirectory to contain the hyp-specific files (timer and vgic again). Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/Makefile | 7 +++- arch/arm64/kvm/hyp/timer-sr.c | 69 ---------------------------------- arch/arm64/kvm/hyp/vgic-v2-sr.c | 82 ----------------------------------------- virt/kvm/arm/hyp/timer-sr.c | 69 ++++++++++++++++++++++++++++++++++ virt/kvm/arm/hyp/vgic-v2-sr.c | 82 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 156 insertions(+), 153 deletions(-) delete mode 100644 arch/arm64/kvm/hyp/timer-sr.c delete mode 100644 arch/arm64/kvm/hyp/vgic-v2-sr.c create mode 100644 virt/kvm/arm/hyp/timer-sr.c create mode 100644 virt/kvm/arm/hyp/vgic-v2-sr.c (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 5326e664fd41..b6a8fc5ad1af 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -2,9 +2,12 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # -obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o +KVM=../../../../virt/kvm + +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o + obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c deleted file mode 100644 index ea00d69e7078..000000000000 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg_el0(cntv_ctl); - timer->cntv_cval = read_sysreg_el0(cntv_cval); - } - - /* Disable the virtual timer */ - write_sysreg_el0(0, cntv_ctl); - - /* Allow physical timer/counter access for the host */ - val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, cnthctl_el2); - - /* Clear cntvoff for the host */ - write_sysreg(0, cntvoff_el2); -} - -void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(cnthctl_el2); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, cnthctl_el2); - - if (kvm->arch.timer.enabled) { - write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); - write_sysreg_el0(timer->cntv_cval, cntv_cval); - isb(); - write_sysreg_el0(timer->cntv_ctl, cntv_ctl); - } -} diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c deleted file mode 100644 index 9514a7d90d71..000000000000 --- a/arch/arm64/kvm/hyp/vgic-v2-sr.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - u32 eisr0, eisr1, elrsr0, elrsr1; - int i, nr_lr; - - if (!base) - return; - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); - cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); - eisr0 = readl_relaxed(base + GICH_EISR0); - elrsr0 = readl_relaxed(base + GICH_ELRSR0); - if (unlikely(nr_lr > 32)) { - eisr1 = readl_relaxed(base + GICH_EISR1); - elrsr1 = readl_relaxed(base + GICH_ELRSR1); - } else { - eisr1 = elrsr1 = 0; - } -#ifdef CONFIG_CPU_BIG_ENDIAN - cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; -#else - cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; - cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; -#endif - cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); - - writel_relaxed(0, base + GICH_HCR); - - for (i = 0; i < nr_lr; i++) - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); -} - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - int i, nr_lr; - - if (!base) - return; - - writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); - writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); - writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - for (i = 0; i < nr_lr; i++) - writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); -} diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c new file mode 100644 index 000000000000..ea00d69e7078 --- /dev/null +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +/* vcpu is already in the HYP VA space */ +void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + if (kvm->arch.timer.enabled) { + timer->cntv_ctl = read_sysreg_el0(cntv_ctl); + timer->cntv_cval = read_sysreg_el0(cntv_cval); + } + + /* Disable the virtual timer */ + write_sysreg_el0(0, cntv_ctl); + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); + + /* Clear cntvoff for the host */ + write_sysreg(0, cntvoff_el2); +} + +void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); + + if (kvm->arch.timer.enabled) { + write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); + write_sysreg_el0(timer->cntv_cval, cntv_cval); + isb(); + write_sysreg_el0(timer->cntv_ctl, cntv_ctl); + } +} diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c new file mode 100644 index 000000000000..9514a7d90d71 --- /dev/null +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + u32 eisr0, eisr1, elrsr0, elrsr1; + int i, nr_lr; + + if (!base) + return; + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); + cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); + eisr0 = readl_relaxed(base + GICH_EISR0); + elrsr0 = readl_relaxed(base + GICH_ELRSR0); + if (unlikely(nr_lr > 32)) { + eisr1 = readl_relaxed(base + GICH_EISR1); + elrsr1 = readl_relaxed(base + GICH_ELRSR1); + } else { + eisr1 = elrsr1 = 0; + } +#ifdef CONFIG_CPU_BIG_ENDIAN + cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; + cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; +#else + cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; +#endif + cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); + + writel_relaxed(0, base + GICH_HCR); + + for (i = 0; i < nr_lr; i++) + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); +} + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + int i, nr_lr; + + if (!base) + return; + + writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); + writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); + writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + for (i = 0; i < nr_lr; i++) + writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); +} -- cgit v1.2.3 From 04fe472615d0216ec0bdd66d9f3f1812b642ada6 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 11 Sep 2015 09:38:32 +0800 Subject: arm64: KVM: Define PMU data structure for each vcpu Here we plan to support virtual PMU for guest by full software emulation, so define some basic structs and functions preparing for futher steps. Define struct kvm_pmc for performance monitor counter and struct kvm_pmu for performance monitor unit for each vcpu. According to ARMv8 spec, the PMU contains at most 32(ARMV8_PMU_MAX_COUNTERS) counters. Since this only supports ARM64 (or PMUv3), add a separate config symbol for it. Signed-off-by: Shannon Zhao Acked-by: Marc Zyngier Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/Kconfig | 7 +++++++ include/kvm/arm_pmu.h | 42 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 include/kvm/arm_pmu.h (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 15851f52096b..fb57fdc6a433 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -38,6 +38,7 @@ #include #include +#include #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS @@ -213,6 +214,7 @@ struct kvm_vcpu_arch { /* VGIC state */ struct vgic_cpu vgic_cpu; struct arch_timer_cpu timer_cpu; + struct kvm_pmu pmu; /* * Anything that is not used directly from assembly code goes diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index a5272c07d1cb..de7450df7629 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -36,6 +36,7 @@ config KVM select HAVE_KVM_EVENTFD select HAVE_KVM_IRQFD select KVM_ARM_VGIC_V3 + select KVM_ARM_PMU if HW_PERF_EVENTS ---help--- Support hosting virtualized guest machines. We don't support KVM with 16K page tables yet, due to the multiple @@ -48,6 +49,12 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. +config KVM_ARM_PMU + bool + ---help--- + Adds support for a virtual Performance Monitoring Unit (PMU) in + virtual machines. + source drivers/vhost/Kconfig endif # VIRTUALIZATION diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h new file mode 100644 index 000000000000..3c2fd568e0a8 --- /dev/null +++ b/include/kvm/arm_pmu.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ASM_ARM_KVM_PMU_H +#define __ASM_ARM_KVM_PMU_H + +#ifdef CONFIG_KVM_ARM_PMU + +#include +#include + +struct kvm_pmc { + u8 idx; /* index into the pmu->pmc array */ + struct perf_event *perf_event; + u64 bitmask; +}; + +struct kvm_pmu { + int irq_num; + struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + bool ready; +}; +#else +struct kvm_pmu { +}; +#endif + +#endif -- cgit v1.2.3 From ab9468340d2bcc2a837b8b536fa819a0fc05a32e Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Thu, 18 Jun 2015 16:01:53 +0800 Subject: arm64: KVM: Add access handler for PMCR register Add reset handler which gets host value of PMCR_EL0 and make writable bits architecturally UNKNOWN except PMCR.E which is zero. Add an access handler for PMCR. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/sys_regs.c | 42 +++++++++++++++++++++++++++++++++++++-- include/kvm/arm_pmu.h | 4 ++++ 3 files changed, 47 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fb57fdc6a433..5def605b4525 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -117,6 +117,9 @@ enum vcpu_sysreg { MDSCR_EL1, /* Monitor Debug System Control Register */ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ + /* Performance Monitors Registers */ + PMCR_EL0, /* Control Register */ + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e90371cfb37..e88ae2d809a5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -439,6 +440,43 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; } +static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + u64 pmcr, val; + + asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr)); + /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN + * except PMCR.E resetting to zero. + */ + val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) + | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); + vcpu_sys_reg(vcpu, PMCR_EL0) = val; +} + +static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 val; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + /* Only update writeable bits of PMCR */ + val = vcpu_sys_reg(vcpu, PMCR_EL0); + val &= ~ARMV8_PMU_PMCR_MASK; + val |= p->regval & ARMV8_PMU_PMCR_MASK; + vcpu_sys_reg(vcpu, PMCR_EL0) = val; + } else { + /* PMCR.P & PMCR.C are RAZ */ + val = vcpu_sys_reg(vcpu, PMCR_EL0) + & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); + p->regval = val; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -623,7 +661,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMCR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), - trap_raz_wi }, + access_pmcr, reset_pmcr, }, /* PMCNTENSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), trap_raz_wi }, @@ -885,7 +923,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, /* PMU */ - { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 3c2fd568e0a8..8157fe5bcbb0 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -34,9 +34,13 @@ struct kvm_pmu { struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; bool ready; }; + +#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) #else struct kvm_pmu { }; + +#define kvm_arm_pmu_v3_ready(v) (false) #endif #endif -- cgit v1.2.3 From 3965c3ce751ab5a97618a2818eec4497576f4654 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 31 Aug 2015 17:20:22 +0800 Subject: arm64: KVM: Add access handler for PMSELR register Since the reset value of PMSELR_EL0 is UNKNOWN, use reset_unknown for its reset handler. When reading PMSELR, return the PMSELR.SEL field to guest. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 20 ++++++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5def605b4525..57a2d8f76c2f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -119,6 +119,7 @@ enum vcpu_sysreg { /* Performance Monitors Registers */ PMCR_EL0, /* Control Register */ + PMSELR_EL0, /* Event Counter Selection Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e88ae2d809a5..b05e20f8a3b9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -477,6 +477,22 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) + vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; + else + /* return PMSELR.SEL field */ + p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_PMU_COUNTER_MASK; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -676,7 +692,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { trap_raz_wi }, /* PMSELR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), - trap_raz_wi }, + access_pmselr, reset_unknown, PMSELR_EL0 }, /* PMCEID0_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), trap_raz_wi }, @@ -927,7 +943,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, -- cgit v1.2.3 From a86b5505304404dc5fc5e62a6dc294706e525003 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 7 Sep 2015 16:11:12 +0800 Subject: arm64: KVM: Add access handler for PMCEID0 and PMCEID1 register Add access handler which gets host value of PMCEID0 or PMCEID1 when guest access these registers. Writing action to PMCEID0 or PMCEID1 is UNDEFINED. Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b05e20f8a3b9..ca8cdf6d83cf 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -493,6 +493,26 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 pmceid; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + BUG_ON(p->is_write); + + if (!(p->Op2 & 1)) + asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); + else + asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid)); + + p->regval = pmceid; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -695,10 +715,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmselr, reset_unknown, PMSELR_EL0 }, /* PMCEID0_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), - trap_raz_wi }, + access_pmceid }, /* PMCEID1_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), - trap_raz_wi }, + access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), trap_raz_wi }, @@ -944,8 +964,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, - { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, + { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, -- cgit v1.2.3 From 051ff581ce70e822729e9474941f3c206cbf7436 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Dec 2015 15:29:06 +0800 Subject: arm64: KVM: Add access handler for event counter register These kind of registers include PMEVCNTRn, PMCCNTR and PMXEVCNTR which is mapped to PMEVCNTRn. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When reading these registers, return the sum of register value and the value perf event counts. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 + arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/sys_regs.c | 139 ++++++++++++++++++++++++++++++++++++-- include/kvm/arm_pmu.h | 11 +++ virt/kvm/arm/pmu.c | 63 +++++++++++++++++ 5 files changed, 213 insertions(+), 4 deletions(-) create mode 100644 virt/kvm/arm/pmu.c (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 57a2d8f76c2f..4ae27fe34240 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -120,6 +120,9 @@ enum vcpu_sysreg { /* Performance Monitors Registers */ PMCR_EL0, /* Control Register */ PMSELR_EL0, /* Event Counter Selection Register */ + PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ + PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, + PMCCNTR_EL0, /* Cycle Counter Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index caee9ee8e12a..122cff482ac4 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -26,3 +26,4 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o +kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ca8cdf6d83cf..ff3214b6fbc8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -513,6 +513,56 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) +{ + u64 pmcr, val; + + pmcr = vcpu_sys_reg(vcpu, PMCR_EL0); + val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) + return false; + + return true; +} + +static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13) { + if (r->Op2 == 2) { + /* PMXEVCNTR_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_PMU_COUNTER_MASK; + } else if (r->Op2 == 0) { + /* PMCCNTR_EL0 */ + idx = ARMV8_PMU_CYCLE_IDX; + } else { + BUG(); + } + } else if (r->CRn == 14 && (r->CRm & 12) == 8) { + /* PMEVCNTRn_EL0 */ + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + if (p->is_write) + kvm_pmu_set_counter_value(vcpu, idx, p->regval); + else + p->regval = kvm_pmu_get_counter_value(vcpu, idx); + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -528,6 +578,13 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } +/* Macro to expand the PMEVCNTRn_EL0 register */ +#define PMU_PMEVCNTR_EL0(n) \ + /* PMEVCNTRn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -721,13 +778,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), - trap_raz_wi }, + access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), trap_raz_wi }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), - trap_raz_wi }, + access_pmu_evcntr }, /* PMUSERENR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), trap_raz_wi }, @@ -742,6 +799,39 @@ static const struct sys_reg_desc sys_reg_descs[] = { { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, + /* PMEVCNTRn_EL0 */ + PMU_PMEVCNTR_EL0(0), + PMU_PMEVCNTR_EL0(1), + PMU_PMEVCNTR_EL0(2), + PMU_PMEVCNTR_EL0(3), + PMU_PMEVCNTR_EL0(4), + PMU_PMEVCNTR_EL0(5), + PMU_PMEVCNTR_EL0(6), + PMU_PMEVCNTR_EL0(7), + PMU_PMEVCNTR_EL0(8), + PMU_PMEVCNTR_EL0(9), + PMU_PMEVCNTR_EL0(10), + PMU_PMEVCNTR_EL0(11), + PMU_PMEVCNTR_EL0(12), + PMU_PMEVCNTR_EL0(13), + PMU_PMEVCNTR_EL0(14), + PMU_PMEVCNTR_EL0(15), + PMU_PMEVCNTR_EL0(16), + PMU_PMEVCNTR_EL0(17), + PMU_PMEVCNTR_EL0(18), + PMU_PMEVCNTR_EL0(19), + PMU_PMEVCNTR_EL0(20), + PMU_PMEVCNTR_EL0(21), + PMU_PMEVCNTR_EL0(22), + PMU_PMEVCNTR_EL0(23), + PMU_PMEVCNTR_EL0(24), + PMU_PMEVCNTR_EL0(25), + PMU_PMEVCNTR_EL0(26), + PMU_PMEVCNTR_EL0(27), + PMU_PMEVCNTR_EL0(28), + PMU_PMEVCNTR_EL0(29), + PMU_PMEVCNTR_EL0(30), + /* DACR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), NULL, reset_unknown, DACR32_EL2 }, @@ -931,6 +1021,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; +/* Macro to expand the PMEVCNTRn register */ +#define PMU_PMEVCNTR(n) \ + /* PMEVCNTRn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr } + /* * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, * depending on the way they are accessed (as a 32bit or a 64bit @@ -966,9 +1063,9 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, - { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, @@ -982,10 +1079,44 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + + /* PMEVCNTRn */ + PMU_PMEVCNTR(0), + PMU_PMEVCNTR(1), + PMU_PMEVCNTR(2), + PMU_PMEVCNTR(3), + PMU_PMEVCNTR(4), + PMU_PMEVCNTR(5), + PMU_PMEVCNTR(6), + PMU_PMEVCNTR(7), + PMU_PMEVCNTR(8), + PMU_PMEVCNTR(9), + PMU_PMEVCNTR(10), + PMU_PMEVCNTR(11), + PMU_PMEVCNTR(12), + PMU_PMEVCNTR(13), + PMU_PMEVCNTR(14), + PMU_PMEVCNTR(15), + PMU_PMEVCNTR(16), + PMU_PMEVCNTR(17), + PMU_PMEVCNTR(18), + PMU_PMEVCNTR(19), + PMU_PMEVCNTR(20), + PMU_PMEVCNTR(21), + PMU_PMEVCNTR(22), + PMU_PMEVCNTR(23), + PMU_PMEVCNTR(24), + PMU_PMEVCNTR(25), + PMU_PMEVCNTR(26), + PMU_PMEVCNTR(27), + PMU_PMEVCNTR(28), + PMU_PMEVCNTR(29), + PMU_PMEVCNTR(30), }; static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 8157fe5bcbb0..bcb769805839 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -23,6 +23,8 @@ #include #include +#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) + struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; @@ -36,11 +38,20 @@ struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); #else struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) (false) +static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx) +{ + return 0; +} +static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx, u64 val) {} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c new file mode 100644 index 000000000000..cd74e6367cd6 --- /dev/null +++ b/virt/kvm/arm/pmu.c @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +/** + * kvm_pmu_get_counter_value - get PMU counter value + * @vcpu: The vcpu pointer + * @select_idx: The counter index + */ +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) +{ + u64 counter, reg, enabled, running; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc = &pmu->pmc[select_idx]; + + reg = (select_idx == ARMV8_PMU_CYCLE_IDX) + ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; + counter = vcpu_sys_reg(vcpu, reg); + + /* The real counter value is equal to the value of counter register plus + * the value perf event counts. + */ + if (pmc->perf_event) + counter += perf_event_read_value(pmc->perf_event, &enabled, + &running); + + return counter & pmc->bitmask; +} + +/** + * kvm_pmu_set_counter_value - set PMU counter value + * @vcpu: The vcpu pointer + * @select_idx: The counter index + * @val: The counter value + */ +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) +{ + u64 reg; + + reg = (select_idx == ARMV8_PMU_CYCLE_IDX) + ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; + vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); +} -- cgit v1.2.3 From 96b0eebcc6a14e3bdb9ff0e7176fbfc225bdde94 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 12:26:13 +0800 Subject: arm64: KVM: Add access handler for PMCNTENSET and PMCNTENCLR register Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMCNTENSET or PMCNTENCLR register. When writing to PMCNTENSET, call perf_event_enable to enable the perf event. When writing to PMCNTENCLR, call perf_event_disable to disable the perf event. Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 35 ++++++++++++++++++--- include/kvm/arm_pmu.h | 9 ++++++ virt/kvm/arm/pmu.c | 66 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 107 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4ae27fe34240..993793b422aa 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -123,6 +123,7 @@ enum vcpu_sysreg { PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, PMCCNTR_EL0, /* Cycle Counter Register */ + PMCNTENSET_EL0, /* Count Enable Set Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ff3214b6fbc8..d4b6ae3c09b5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -563,6 +563,33 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, return true; } +static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 val, mask; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + mask = kvm_pmu_valid_counter_mask(vcpu); + if (p->is_write) { + val = p->regval & mask; + if (r->Op2 & 0x1) { + /* accessing PMCNTENSET_EL0 */ + vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; + kvm_pmu_enable_counter(vcpu, val); + } else { + /* accessing PMCNTENCLR_EL0 */ + vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; + kvm_pmu_disable_counter(vcpu, val); + } + } else { + p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -757,10 +784,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmcr, reset_pmcr, }, /* PMCNTENSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), - trap_raz_wi }, + access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, /* PMCNTENCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), - trap_raz_wi }, + access_pmcnten, NULL, PMCNTENSET_EL0 }, /* PMOVSCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), trap_raz_wi }, @@ -1057,8 +1084,8 @@ static const struct sys_reg_desc cp15_regs[] = { /* PMU */ { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, - { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, + { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index bcb769805839..b70058ef1dd6 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -40,6 +40,9 @@ struct kvm_pmu { #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); #else struct kvm_pmu { }; @@ -52,6 +55,12 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, } static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) {} +static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index cd74e6367cd6..f8dc17430813 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -61,3 +61,69 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); } + +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; + + val &= ARMV8_PMU_PMCR_N_MASK; + if (val == 0) + return BIT(ARMV8_PMU_CYCLE_IDX); + else + return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); +} + +/** + * kvm_pmu_enable_counter - enable selected PMU counter + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMCNTENSET register + * + * Call perf_event_enable to start counting the perf event + */ +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc; + + if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) + return; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + if (!(val & BIT(i))) + continue; + + pmc = &pmu->pmc[i]; + if (pmc->perf_event) { + perf_event_enable(pmc->perf_event); + if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) + kvm_debug("fail to enable perf event\n"); + } + } +} + +/** + * kvm_pmu_disable_counter - disable selected PMU counter + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMCNTENCLR register + * + * Call perf_event_disable to stop counting the perf event + */ +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc; + + if (!val) + return; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + if (!(val & BIT(i))) + continue; + + pmc = &pmu->pmc[i]; + if (pmc->perf_event) + perf_event_disable(pmc->perf_event); + } +} -- cgit v1.2.3 From 9feb21ac57d53003557ddc01f9aee496269996c7 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 23 Feb 2016 11:11:27 +0800 Subject: arm64: KVM: Add access handler for event type register These kind of registers include PMEVTYPERn, PMCCFILTR and PMXEVTYPER which is mapped to PMEVTYPERn or PMCCFILTR. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When writing to these registers, create a perf_event for the selected event type. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 + arch/arm64/kvm/sys_regs.c | 126 +++++++++++++++++++++++++++++++++++++- 2 files changed, 127 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 993793b422aa..121182dd0947 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -123,6 +123,9 @@ enum vcpu_sysreg { PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, PMCCNTR_EL0, /* Cycle Counter Register */ + PMEVTYPER0_EL0, /* Event Type Register (0-30) */ + PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, + PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCNTENSET_EL0, /* Count Enable Set Register */ /* 32bit specific registers. Keep them at the end of the range */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d4b6ae3c09b5..4faf324c9be9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -563,6 +563,42 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, return true; } +static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx, reg; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { + /* PMXEVTYPER_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; + reg = PMEVTYPER0_EL0 + idx; + } else if (r->CRn == 14 && (r->CRm & 12) == 12) { + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + if (idx == ARMV8_PMU_CYCLE_IDX) + reg = PMCCFILTR_EL0; + else + /* PMEVTYPERn_EL0 */ + reg = PMEVTYPER0_EL0 + idx; + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + if (p->is_write) { + kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); + vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; + } else { + p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; + } + + return true; +} + static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -612,6 +648,13 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } +/* Macro to expand the PMEVTYPERn_EL0 register */ +#define PMU_PMEVTYPER_EL0(n) \ + /* PMEVTYPERn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -808,7 +851,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), - trap_raz_wi }, + access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), access_pmu_evcntr }, @@ -858,6 +901,44 @@ static const struct sys_reg_desc sys_reg_descs[] = { PMU_PMEVCNTR_EL0(28), PMU_PMEVCNTR_EL0(29), PMU_PMEVCNTR_EL0(30), + /* PMEVTYPERn_EL0 */ + PMU_PMEVTYPER_EL0(0), + PMU_PMEVTYPER_EL0(1), + PMU_PMEVTYPER_EL0(2), + PMU_PMEVTYPER_EL0(3), + PMU_PMEVTYPER_EL0(4), + PMU_PMEVTYPER_EL0(5), + PMU_PMEVTYPER_EL0(6), + PMU_PMEVTYPER_EL0(7), + PMU_PMEVTYPER_EL0(8), + PMU_PMEVTYPER_EL0(9), + PMU_PMEVTYPER_EL0(10), + PMU_PMEVTYPER_EL0(11), + PMU_PMEVTYPER_EL0(12), + PMU_PMEVTYPER_EL0(13), + PMU_PMEVTYPER_EL0(14), + PMU_PMEVTYPER_EL0(15), + PMU_PMEVTYPER_EL0(16), + PMU_PMEVTYPER_EL0(17), + PMU_PMEVTYPER_EL0(18), + PMU_PMEVTYPER_EL0(19), + PMU_PMEVTYPER_EL0(20), + PMU_PMEVTYPER_EL0(21), + PMU_PMEVTYPER_EL0(22), + PMU_PMEVTYPER_EL0(23), + PMU_PMEVTYPER_EL0(24), + PMU_PMEVTYPER_EL0(25), + PMU_PMEVTYPER_EL0(26), + PMU_PMEVTYPER_EL0(27), + PMU_PMEVTYPER_EL0(28), + PMU_PMEVTYPER_EL0(29), + PMU_PMEVTYPER_EL0(30), + /* PMCCFILTR_EL0 + * This register resets as unknown in 64bit mode while it resets as zero + * in 32bit mode. Here we choose to reset it as zero for consistency. + */ + { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111), + access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, /* DACR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), @@ -1055,6 +1136,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr } +/* Macro to expand the PMEVTYPERn register */ +#define PMU_PMEVTYPER(n) \ + /* PMEVTYPERn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evtyper } + /* * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, * depending on the way they are accessed (as a 32bit or a 64bit @@ -1091,7 +1179,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, - { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, @@ -1139,6 +1227,40 @@ static const struct sys_reg_desc cp15_regs[] = { PMU_PMEVCNTR(28), PMU_PMEVCNTR(29), PMU_PMEVCNTR(30), + /* PMEVTYPERn */ + PMU_PMEVTYPER(0), + PMU_PMEVTYPER(1), + PMU_PMEVTYPER(2), + PMU_PMEVTYPER(3), + PMU_PMEVTYPER(4), + PMU_PMEVTYPER(5), + PMU_PMEVTYPER(6), + PMU_PMEVTYPER(7), + PMU_PMEVTYPER(8), + PMU_PMEVTYPER(9), + PMU_PMEVTYPER(10), + PMU_PMEVTYPER(11), + PMU_PMEVTYPER(12), + PMU_PMEVTYPER(13), + PMU_PMEVTYPER(14), + PMU_PMEVTYPER(15), + PMU_PMEVTYPER(16), + PMU_PMEVTYPER(17), + PMU_PMEVTYPER(18), + PMU_PMEVTYPER(19), + PMU_PMEVTYPER(20), + PMU_PMEVTYPER(21), + PMU_PMEVTYPER(22), + PMU_PMEVTYPER(23), + PMU_PMEVTYPER(24), + PMU_PMEVTYPER(25), + PMU_PMEVTYPER(26), + PMU_PMEVTYPER(27), + PMU_PMEVTYPER(28), + PMU_PMEVTYPER(29), + PMU_PMEVTYPER(30), + /* PMCCFILTR */ + { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, }; static const struct sys_reg_desc cp15_64_regs[] = { -- cgit v1.2.3 From 9db52c78cd43c7fe69992cb7d57cffa991b36ced Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 14:40:20 +0800 Subject: arm64: KVM: Add access handler for PMINTENSET and PMINTENCLR register Since the reset value of PMINTENSET and PMINTENCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMINTENSET or PMINTENCLR register. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 32 ++++++++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 121182dd0947..da59f44f0c84 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -127,6 +127,7 @@ enum vcpu_sysreg { PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCNTENSET_EL0, /* Count Enable Set Register */ + PMINTENSET_EL1, /* Interrupt Enable Set Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4faf324c9be9..bfc70b2529cd 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -626,6 +626,30 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + u64 val = p->regval & mask; + + if (r->Op2 & 0x1) + /* accessing PMINTENSET_EL1 */ + vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; + else + /* accessing PMINTENCLR_EL1 */ + vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; + } else { + p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -784,10 +808,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), - trap_raz_wi }, + access_pminten, reset_unknown, PMINTENSET_EL1 }, /* PMINTENCLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), - trap_raz_wi }, + access_pminten, NULL, PMINTENSET_EL1 }, /* MAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), @@ -1182,8 +1206,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, + { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, -- cgit v1.2.3 From 76d883c4e6401b98ea26d40c437ff62719a517ad Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 15:03:26 +0800 Subject: arm64: KVM: Add access handler for PMOVSSET and PMOVSCLR register Since the reset value of PMOVSSET and PMOVSCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMOVSSET or PMOVSCLR register. When writing non-zero value to PMOVSSET, the counter and its interrupt is enabled, kick this vcpu to sync PMU interrupt. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 29 ++++++++++++++++++++++++++--- include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 31 +++++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index da59f44f0c84..6c61a2bda6de 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -128,6 +128,7 @@ enum vcpu_sysreg { PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCNTENSET_EL0, /* Count Enable Set Register */ PMINTENSET_EL1, /* Interrupt Enable Set Register */ + PMOVSSET_EL0, /* Overflow Flag Status Set Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index bfc70b2529cd..6a774f9b9cca 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -650,6 +650,28 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + if (r->CRm & 0x2) + /* accessing PMOVSSET_EL0 */ + kvm_pmu_overflow_set(vcpu, p->regval & mask); + else + /* accessing PMOVSCLR_EL0 */ + vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); + } else { + p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -857,7 +879,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmcnten, NULL, PMCNTENSET_EL0 }, /* PMOVSCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), - trap_raz_wi }, + access_pmovs, NULL, PMOVSSET_EL0 }, /* PMSWINC_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), trap_raz_wi }, @@ -884,7 +906,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { trap_raz_wi }, /* PMOVSSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), - trap_raz_wi }, + access_pmovs, reset_unknown, PMOVSSET_EL0 }, /* TPIDR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), @@ -1198,7 +1220,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, - { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, @@ -1208,6 +1230,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, + { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index c57377970d4e..60061dabe881 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -43,6 +43,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); #else @@ -63,6 +64,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) } static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 591a11d1bd13..023286101fef 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -149,6 +149,37 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) } } +static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) +{ + u64 reg = 0; + + if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0); + reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1); + reg &= kvm_pmu_valid_counter_mask(vcpu); + + return reg; +} + +/** + * kvm_pmu_overflow_set - set PMU overflow interrupt + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMOVSSET register + */ +void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) +{ + u64 reg; + + if (val == 0) + return; + + vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val; + reg = kvm_pmu_overflow_status(vcpu); + if (reg != 0) + kvm_vcpu_kick(vcpu); +} + static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) { return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && -- cgit v1.2.3 From 7a0adc7064b88609e2917446af8789fac1d4fdd1 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 15:49:39 +0800 Subject: arm64: KVM: Add access handler for PMSWINC register Add access handler which emulates writing and reading PMSWINC register and add support for creating software increment event. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_perf_event.h | 2 ++ arch/arm64/kvm/sys_regs.c | 20 ++++++++++++++++++- include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 34 +++++++++++++++++++++++++++++++++ 5 files changed, 58 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6c61a2bda6de..4001e85b4818 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -129,6 +129,7 @@ enum vcpu_sysreg { PMCNTENSET_EL0, /* Count Enable Set Register */ PMINTENSET_EL1, /* Interrupt Enable Set Register */ PMOVSSET_EL0, /* Overflow Flag Status Set Register */ + PMSWINC_EL0, /* Software Increment Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h index d1c9d504f928..62fa60fbc0b3 100644 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -45,6 +45,8 @@ #define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ #define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ + /* * Event filters for PMUv3 */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6a774f9b9cca..10e53796926c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -672,6 +672,23 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 mask; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + mask = kvm_pmu_valid_counter_mask(vcpu); + kvm_pmu_software_increment(vcpu, p->regval & mask); + return true; + } + + return false; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -882,7 +899,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmovs, NULL, PMOVSSET_EL0 }, /* PMSWINC_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), - trap_raz_wi }, + access_pmswinc, reset_unknown, PMSWINC_EL0 }, /* PMSELR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), access_pmselr, reset_unknown, PMSELR_EL0 }, @@ -1221,6 +1238,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, + { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 60061dabe881..348c4c9d763a 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -44,6 +44,7 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); #else @@ -65,6 +66,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 023286101fef..9fc775ef03ec 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -180,6 +180,36 @@ void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) kvm_vcpu_kick(vcpu); } +/** + * kvm_pmu_software_increment - do software increment + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMSWINC register + */ +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) +{ + int i; + u64 type, enable, reg; + + if (val == 0) + return; + + enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) { + if (!(val & BIT(i))) + continue; + type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) + & ARMV8_PMU_EVTYPE_EVENT; + if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + && (enable & BIT(i))) { + reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; + reg = lower_32_bits(reg); + vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; + if (!reg) + kvm_pmu_overflow_set(vcpu, BIT(i)); + } + } +} + static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) { return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && @@ -208,6 +238,10 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, kvm_pmu_stop_counter(vcpu, pmc); eventsel = data & ARMV8_PMU_EVTYPE_EVENT; + /* Software increment event does't need to be backed by a perf event */ + if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + return; + memset(&attr, 0, sizeof(struct perf_event_attr)); attr.type = PERF_TYPE_RAW; attr.size = sizeof(attr); -- cgit v1.2.3 From 76993739cd6f5b42e881fe3332b9f8eb98cd6907 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Wed, 28 Oct 2015 12:10:30 +0800 Subject: arm64: KVM: Add helper to handle PMCR register bits According to ARMv8 spec, when writing 1 to PMCR.E, all counters are enabled by PMCNTENSET, while writing 0 to PMCR.E, all counters are disabled. When writing 1 to PMCR.P, reset all event counters, not including PMCCNTR, to zero. When writing 1 to PMCR.C, reset PMCCNTR to zero. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_perf_event.h | 4 +++- arch/arm64/kvm/sys_regs.c | 1 + include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 34 +++++++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h index 62fa60fbc0b3..6d080c07873b 100644 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -29,9 +29,11 @@ #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +/* Determines which bit of PMCCNTR_EL0 generates an overflow */ +#define ARMV8_PMU_PMCR_LC (1 << 6) #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ #define ARMV8_PMU_PMCR_N_MASK 0x1f -#define ARMV8_PMU_PMCR_MASK 0x3f /* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ /* * PMOVSR: counters overflow flag status reg diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 10e53796926c..12f36ef8caa0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -467,6 +467,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; vcpu_sys_reg(vcpu, PMCR_EL0) = val; + kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ val = vcpu_sys_reg(vcpu, PMCR_EL0) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 348c4c9d763a..8bc92d119713 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -45,6 +45,7 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); #else @@ -67,6 +68,7 @@ static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 9fc775ef03ec..cda869c609dd 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -210,6 +210,40 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) } } +/** + * kvm_pmu_handle_pmcr - handle PMCR register + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMCR register + */ +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) +{ + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc; + u64 mask; + int i; + + mask = kvm_pmu_valid_counter_mask(vcpu); + if (val & ARMV8_PMU_PMCR_E) { + kvm_pmu_enable_counter(vcpu, + vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask); + } else { + kvm_pmu_disable_counter(vcpu, mask); + } + + if (val & ARMV8_PMU_PMCR_C) + kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); + + if (val & ARMV8_PMU_PMCR_P) { + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) + kvm_pmu_set_counter_value(vcpu, i, 0); + } + + if (val & ARMV8_PMU_PMCR_LC) { + pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX]; + pmc->bitmask = 0xffffffffffffffffUL; + } +} + static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) { return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && -- cgit v1.2.3 From d692b8ad6ec4814ddd9a37ce5c9c9d971e741088 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 15:15:56 +0800 Subject: arm64: KVM: Add access handler for PMUSERENR register This register resets as unknown in 64bit mode while it resets as zero in 32bit mode. Here we choose to reset it as zero for consistency. PMUSERENR_EL0 holds some bits which decide whether PMU registers can be accessed from EL0. Add some check helpers to handle the access from EL0. When these bits are zero, only reading PMUSERENR will trap to EL2 and writing PMUSERENR or reading/writing other PMU registers will trap to EL1 other than EL2 when HCR.TGE==0. To current KVM configuration (HCR.TGE==0) there is no way to get these traps. Here we write 0xf to physical PMUSERENR register on VM entry, so that it will trap PMU access from EL0 to EL2. Within the register access handler we check the real value of guest PMUSERENR register to decide whether this access is allowed. If not allowed, return false to inject UND to guest. Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_hyp.h | 1 + arch/arm64/include/asm/kvm_perf_event.h | 9 +++ arch/arm64/kvm/hyp/switch.c | 3 + arch/arm64/kvm/sys_regs.c | 101 ++++++++++++++++++++++++++++++-- 5 files changed, 110 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4001e85b4818..a819c6debce4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -130,6 +130,7 @@ enum vcpu_sysreg { PMINTENSET_EL1, /* Interrupt Enable Set Register */ PMOVSSET_EL0, /* Overflow Flag Status Set Register */ PMSWINC_EL0, /* Software Increment Register */ + PMUSERENR_EL0, /* User Enable Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 44eaff70da6a..a46b019ebcf5 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #define __hyp_text __section(.hyp.text) notrace diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h index 6d080c07873b..c18fdebb8f66 100644 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -56,4 +56,13 @@ #define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) #define ARMV8_PMU_INCLUDE_EL2 (1 << 27) +/* + * PMUSERENR: user enable reg + */ +#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ +#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ +#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ +#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ + #endif diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 7b81e56111ab..437cfad5e3d8 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -82,6 +82,8 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); + /* Make sure we trap PMU access from EL0 to EL2 */ + write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); __activate_traps_arch()(); } @@ -110,6 +112,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) __deactivate_traps_arch()(); write_sysreg(0, hstr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); + write_sysreg(0, pmuserenr_el0); } static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 12f36ef8caa0..fe15c2310a65 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -453,6 +453,37 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_sys_reg(vcpu, PMCR_EL0) = val; } +static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu)); +} + +static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN)) + || vcpu_mode_priv(vcpu)); +} + +static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN)) + || vcpu_mode_priv(vcpu)); +} + +static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN)) + || vcpu_mode_priv(vcpu)); +} + static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -461,6 +492,9 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (p->is_write) { /* Only update writeable bits of PMCR */ val = vcpu_sys_reg(vcpu, PMCR_EL0); @@ -484,6 +518,9 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + if (p->is_write) vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; else @@ -504,6 +541,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, BUG_ON(p->is_write); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (!(p->Op2 & 1)) asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); else @@ -538,16 +578,25 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, if (r->CRn == 9 && r->CRm == 13) { if (r->Op2 == 2) { /* PMXEVCNTR_EL0 */ + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ + if (pmu_access_cycle_counter_el0_disabled(vcpu)) + return false; + idx = ARMV8_PMU_CYCLE_IDX; } else { BUG(); } } else if (r->CRn == 14 && (r->CRm & 12) == 8) { /* PMEVCNTRn_EL0 */ + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); } else { BUG(); @@ -556,10 +605,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, if (!pmu_counter_idx_valid(vcpu, idx)) return false; - if (p->is_write) + if (p->is_write) { + if (pmu_access_el0_disabled(vcpu)) + return false; + kvm_pmu_set_counter_value(vcpu, idx, p->regval); - else + } else { p->regval = kvm_pmu_get_counter_value(vcpu, idx); + } return true; } @@ -572,6 +625,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; @@ -608,6 +664,9 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + mask = kvm_pmu_valid_counter_mask(vcpu); if (p->is_write) { val = p->regval & mask; @@ -635,6 +694,9 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (!vcpu_mode_priv(vcpu)) + return false; + if (p->is_write) { u64 val = p->regval & mask; @@ -659,6 +721,9 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (p->is_write) { if (r->CRm & 0x2) /* accessing PMOVSSET_EL0 */ @@ -681,6 +746,9 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_write_swinc_el0_disabled(vcpu)) + return false; + if (p->is_write) { mask = kvm_pmu_valid_counter_mask(vcpu); kvm_pmu_software_increment(vcpu, p->regval & mask); @@ -690,6 +758,26 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return false; } +static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + if (!vcpu_mode_priv(vcpu)) + return false; + + vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval + & ARMV8_PMU_USERENR_MASK; + } else { + p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0) + & ARMV8_PMU_USERENR_MASK; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -919,9 +1007,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), access_pmu_evcntr }, - /* PMUSERENR_EL0 */ + /* PMUSERENR_EL0 + * This register resets as unknown in 64bit mode while it resets as zero + * in 32bit mode. Here we choose to reset it as zero for consistency. + */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), - trap_raz_wi }, + access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, /* PMOVSSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), access_pmovs, reset_unknown, PMOVSSET_EL0 }, @@ -1246,7 +1337,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, - { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, -- cgit v1.2.3 From 2aa36e9840d71710f06b3c29634f044fde8bcbe5 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 11 Sep 2015 11:30:22 +0800 Subject: arm64: KVM: Reset PMU state when resetting vcpu When resetting vcpu, it needs to reset the PMU state to initial status. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/kvm/reset.c | 3 +++ include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 17 +++++++++++++++++ 3 files changed, 22 insertions(+) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f34745cb3d23..dfbce781d284 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -120,6 +120,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset system registers */ kvm_reset_sys_regs(vcpu); + /* Reset PMU */ + kvm_pmu_vcpu_reset(vcpu); + /* Reset timer */ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); } diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 9c184edb8e07..b4993eb76aa1 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -42,6 +42,7 @@ struct kvm_pmu { u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); @@ -67,6 +68,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { return 0; } +static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 74e858c42ae1..1dbbc2c51559 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -84,6 +84,23 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) } } +/** + * kvm_pmu_vcpu_reset - reset pmu state for cpu + * @vcpu: The vcpu pointer + * + */ +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); + pmu->pmc[i].idx = i; + pmu->pmc[i].bitmask = 0xffffffffUL; + } +} + u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; -- cgit v1.2.3 From 808e738142e7086ef793ebf9797099c392894e65 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 22:46:15 +0800 Subject: arm64: KVM: Add a new feature bit for PMUv3 To support guest PMUv3, use one bit of the VCPU INIT feature array. Initialize the PMU when initialzing the vcpu with that bit and PMU overflow interrupt set. Signed-off-by: Shannon Zhao Acked-by: Peter Maydell Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/api.txt | 2 ++ arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/uapi/asm/kvm.h | 1 + arch/arm64/kvm/reset.c | 3 +++ include/kvm/arm_pmu.h | 2 ++ include/uapi/linux/kvm.h | 1 + virt/kvm/arm/pmu.c | 10 ++++++++++ 7 files changed, 20 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 07e4cdf02407..9684f8dc6bb2 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2577,6 +2577,8 @@ Possible features: Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. Depends on KVM_CAP_ARM_PSCI_0_2. + - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. + Depends on KVM_CAP_ARM_PMU_V3. 4.83 KVM_ARM_PREFERRED_TARGET diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a819c6debce4..b02ef0828f22 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -42,7 +42,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 3 +#define KVM_VCPU_MAX_FEATURES 4 int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 2d4ca4bb0dd3..6aedbe314432 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -94,6 +94,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ +#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ struct kvm_vcpu_init { __u32 target; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index dfbce781d284..cf4f28a7a514 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -77,6 +77,9 @@ int kvm_arch_dev_ioctl_check_extension(long ext) case KVM_CAP_GUEST_DEBUG_HW_WPS: r = get_num_wrps(); break; + case KVM_CAP_ARM_PMU_V3: + r = kvm_arm_support_pmu_v3(); + break; case KVM_CAP_SET_GUEST_DEBUG: r = 1; break; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 9f87d717ef84..ee62497d46f7 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -53,6 +53,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +bool kvm_arm_support_pmu_v3(void); #else struct kvm_pmu { }; @@ -80,6 +81,7 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} +static inline bool kvm_arm_support_pmu_v3(void) { return false; } #endif #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 9da905157cee..dc16d3084d4a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -850,6 +850,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122 #define KVM_CAP_HYPERV_SYNIC 123 #define KVM_CAP_S390_RI 124 +#define KVM_CAP_ARM_PMU_V3 125 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 9b83857da195..6e28f4f86cc6 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -405,3 +405,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, pmc->perf_event = event; } + +bool kvm_arm_support_pmu_v3(void) +{ + /* + * Check if HW_PERF_EVENTS are supported by checking the number of + * hardware performance counters. This could ensure the presence of + * a physical PMU and CONFIG_PERF_EVENT is selected. + */ + return (perf_num_counters() > 0); +} -- cgit v1.2.3 From f577f6c2a6a5ccabe98061f256a1e2ff468d5e93 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 20:56:17 +0800 Subject: arm64: KVM: Introduce per-vcpu kvm device controls In some cases it needs to get/set attributes specific to a vcpu and so needs something else than ONE_REG. Let's copy the KVM_DEVICE approach, and define the respective ioctls for the vcpu file descriptor. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Acked-by: Peter Maydell Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/api.txt | 10 +++--- Documentation/virtual/kvm/devices/vcpu.txt | 8 +++++ arch/arm/kvm/arm.c | 55 ++++++++++++++++++++++++++++++ arch/arm64/kvm/reset.c | 1 + include/uapi/linux/kvm.h | 1 + 5 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 Documentation/virtual/kvm/devices/vcpu.txt (limited to 'arch/arm64/kvm') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 9684f8dc6bb2..cb2ef0bcdcb5 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2507,8 +2507,9 @@ struct kvm_create_device { 4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR -Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device -Type: device ioctl, vm ioctl +Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, + KVM_CAP_VCPU_ATTRIBUTES for vcpu device +Type: device ioctl, vm ioctl, vcpu ioctl Parameters: struct kvm_device_attr Returns: 0 on success, -1 on error Errors: @@ -2533,8 +2534,9 @@ struct kvm_device_attr { 4.81 KVM_HAS_DEVICE_ATTR -Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device -Type: device ioctl, vm ioctl +Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, + KVM_CAP_VCPU_ATTRIBUTES for vcpu device +Type: device ioctl, vm ioctl, vcpu ioctl Parameters: struct kvm_device_attr Returns: 0 on success, -1 on error Errors: diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt new file mode 100644 index 000000000000..3cc59c5e44ce --- /dev/null +++ b/Documentation/virtual/kvm/devices/vcpu.txt @@ -0,0 +1,8 @@ +Generic vcpu interface +==================================== + +The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, +KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct +kvm_device_attr as other devices, but targets VCPU-wide settings and controls. + +The groups and attributes per virtual cpu, if any, are architecture specific. diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9d133df2da53..166232356291 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -828,11 +828,51 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, return 0; } +static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + +static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + +static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm_device_attr attr; switch (ioctl) { case KVM_ARM_VCPU_INIT: { @@ -875,6 +915,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return -E2BIG; return kvm_arm_copy_reg_indices(vcpu, user_list->reg); } + case KVM_SET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_set_attr(vcpu, &attr); + } + case KVM_GET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_get_attr(vcpu, &attr); + } + case KVM_HAS_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_has_attr(vcpu, &attr); + } default: return -EINVAL; } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index cf4f28a7a514..9677bf069bcc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -81,6 +81,7 @@ int kvm_arch_dev_ioctl_check_extension(long ext) r = kvm_arm_support_pmu_v3(); break; case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; default: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index dc16d3084d4a..50f44a229212 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -851,6 +851,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_HYPERV_SYNIC 123 #define KVM_CAP_S390_RI 124 #define KVM_CAP_ARM_PMU_V3 125 +#define KVM_CAP_VCPU_ATTRIBUTES 126 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From bb0c70bcca6ba3c84afc2da7426f3b923bbe6825 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 21:35:32 +0800 Subject: arm64: KVM: Add a new vcpu device control group for PMUv3 To configure the virtual PMUv3 overflow interrupt number, we use the vcpu kvm_device ioctl, encapsulating the KVM_ARM_VCPU_PMU_V3_IRQ attribute within the KVM_ARM_VCPU_PMU_V3_CTRL group. After configuring the PMUv3, call the vcpu ioctl with attribute KVM_ARM_VCPU_PMU_V3_INIT to initialize the PMUv3. Signed-off-by: Shannon Zhao Acked-by: Peter Maydell Reviewed-by: Andrew Jones Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/devices/vcpu.txt | 25 +++++++ arch/arm/include/asm/kvm_host.h | 15 ++++ arch/arm/kvm/arm.c | 3 + arch/arm64/include/asm/kvm_host.h | 6 ++ arch/arm64/include/uapi/asm/kvm.h | 5 ++ arch/arm64/kvm/guest.c | 51 +++++++++++++ include/kvm/arm_pmu.h | 23 ++++++ virt/kvm/arm/pmu.c | 112 +++++++++++++++++++++++++++++ 8 files changed, 240 insertions(+) (limited to 'arch/arm64/kvm') diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt index 3cc59c5e44ce..c04165868faf 100644 --- a/Documentation/virtual/kvm/devices/vcpu.txt +++ b/Documentation/virtual/kvm/devices/vcpu.txt @@ -6,3 +6,28 @@ KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct kvm_device_attr as other devices, but targets VCPU-wide settings and controls. The groups and attributes per virtual cpu, if any, are architecture specific. + +1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL +Architectures: ARM64 + +1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ +Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a + pointer to an int +Returns: -EBUSY: The PMU overflow interrupt is already set + -ENXIO: The overflow interrupt not set when attempting to get it + -ENODEV: PMUv3 not supported + -EINVAL: Invalid PMU overflow interrupt number supplied + +A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt +number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt +type must be same for each vcpu. As a PPI, the interrupt number is the same for +all vcpus, while as an SPI it must be a separate number per vcpu. + +1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT +Parameters: no additional parameter in kvm_device_attr.addr +Returns: -ENODEV: PMUv3 not supported + -ENXIO: PMUv3 not properly configured as required prior to calling this + attribute + -EBUSY: PMUv3 already initialized + +Request the initialization of the PMUv3. diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 19e9aba85463..385070180c25 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -287,5 +287,20 @@ static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} +static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 166232356291..75c7fed5d14c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -835,6 +835,7 @@ static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); break; } @@ -848,6 +849,7 @@ static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); break; } @@ -861,6 +863,7 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); break; } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b02ef0828f22..71fa6fe9d54a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -361,6 +361,12 @@ void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); /* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 6aedbe314432..f209ea151dca 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -205,6 +205,11 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_VGIC_GRP_CTRL 4 #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +/* Device Control API on vcpu fd */ +#define KVM_ARM_VCPU_PMU_V3_CTRL 0 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index fcb778899a38..dbe45c364bbb 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -380,3 +380,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, } return 0; } + +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} + +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} + +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index ee62497d46f7..fe389ac31489 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -39,6 +39,7 @@ struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); @@ -54,11 +55,18 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); bool kvm_arm_support_pmu_v3(void); +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); #else struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) (false) +#define kvm_arm_pmu_irq_initialized(v) (false) static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) { @@ -82,6 +90,21 @@ static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} static inline bool kvm_arm_support_pmu_v3(void) { return false; } +static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 6e28f4f86cc6..b5754c6c5508 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -415,3 +416,114 @@ bool kvm_arm_support_pmu_v3(void) */ return (perf_num_counters() > 0); } + +static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_arm_support_pmu_v3()) + return -ENODEV; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || + !kvm_arm_pmu_irq_initialized(vcpu)) + return -ENXIO; + + if (kvm_arm_pmu_v3_ready(vcpu)) + return -EBUSY; + + kvm_pmu_vcpu_reset(vcpu); + vcpu->arch.pmu.ready = true; + + return 0; +} + +static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!kvm_arm_pmu_irq_initialized(vcpu)) + continue; + + if (is_ppi) { + if (vcpu->arch.pmu.irq_num != irq) + return false; + } else { + if (vcpu->arch.pmu.irq_num == irq) + return false; + } + } + + return true; +} + + +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: { + int __user *uaddr = (int __user *)(long)attr->addr; + int irq; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return -ENODEV; + + if (get_user(irq, uaddr)) + return -EFAULT; + + /* + * The PMU overflow interrupt could be a PPI or SPI, but for one + * VM the interrupt type must be same for each vcpu. As a PPI, + * the interrupt number is the same for all vcpus, while as an + * SPI it must be a separate number per vcpu. + */ + if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs || + !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS)) + return -EINVAL; + + if (kvm_arm_pmu_irq_initialized(vcpu)) + return -EBUSY; + + kvm_debug("Set kvm ARM PMU irq: %d\n", irq); + vcpu->arch.pmu.irq_num = irq; + return 0; + } + case KVM_ARM_VCPU_PMU_V3_INIT: + return kvm_arm_pmu_v3_init(vcpu); + } + + return -ENXIO; +} + +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: { + int __user *uaddr = (int __user *)(long)attr->addr; + int irq; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return -ENODEV; + + if (!kvm_arm_pmu_irq_initialized(vcpu)) + return -ENXIO; + + irq = vcpu->arch.pmu.irq_num; + return put_user(irq, uaddr); + } + } + + return -ENXIO; +} + +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: + case KVM_ARM_VCPU_PMU_V3_INIT: + if (kvm_arm_support_pmu_v3() && + test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return 0; + } + + return -ENXIO; +} -- cgit v1.2.3 From 623eefa8d04c6c3df69a0630989f10b3762b3b00 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 18:27:04 +0000 Subject: arm64: KVM: Switch the sys_reg search to be a binary search Our 64bit sys_reg table is about 90 entries long (so far, and the PMU support is likely to increase this). This means that on average, it takes 45 comparaisons to find the right entry (and actually the full 90 if we have to search the invariant table). Not the most efficient thing. Specially when you think that this table is already sorted. Switching to a binary search effectively reduces the search to about 7 comparaisons. Slightly better! As an added bonus, the comparison is done by comparing all the fields at once, instead of one at a time. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index fe15c2310a65..61ba59104845 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -20,6 +20,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -1453,29 +1454,32 @@ static const struct sys_reg_desc *get_target_table(unsigned target, } } +#define reg_to_match_value(x) \ + ({ \ + unsigned long val; \ + val = (x)->Op0 << 14; \ + val |= (x)->Op1 << 11; \ + val |= (x)->CRn << 7; \ + val |= (x)->CRm << 3; \ + val |= (x)->Op2; \ + val; \ + }) + +static int match_sys_reg(const void *key, const void *elt) +{ + const unsigned long pval = (unsigned long)key; + const struct sys_reg_desc *r = elt; + + return pval - reg_to_match_value(r); +} + static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], unsigned int num) { - unsigned int i; - - for (i = 0; i < num; i++) { - const struct sys_reg_desc *r = &table[i]; + unsigned long pval = reg_to_match_value(params); - if (params->Op0 != r->Op0) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op2 != r->Op2) - continue; - - return r; - } - return NULL; + return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); } int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) -- cgit v1.2.3 From 1b8e83c04ee2c05c0cd0d304c4b389adf24ebe7f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 17 Feb 2016 10:25:05 +0000 Subject: arm64: KVM: vgic-v3: Avoid accessing ICH registers Just like on GICv2, we're a bit hammer-happy with GICv3, and access them more often than we should. Adopt a policy similar to what we do for GICv2, only save/restoring the minimal set of registers. As we don't access the registers linearly anymore (we may skip some), the convoluted accessors become slightly simpler, and we can drop the ugly indexing macro that tended to confuse the reviewers. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 293 ++++++++++++++++++++++++---------------- include/kvm/arm_vgic.h | 6 - virt/kvm/arm/vgic-v3.c | 4 +- 3 files changed, 182 insertions(+), 121 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 0035b2d3fb6d..e596945a88f7 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -37,12 +37,104 @@ asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ } while (0) -/* vcpu is already in the HYP VA space */ +static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) +{ + switch (lr & 0xf) { + case 0: + return read_gicreg(ICH_LR0_EL2); + case 1: + return read_gicreg(ICH_LR1_EL2); + case 2: + return read_gicreg(ICH_LR2_EL2); + case 3: + return read_gicreg(ICH_LR3_EL2); + case 4: + return read_gicreg(ICH_LR4_EL2); + case 5: + return read_gicreg(ICH_LR5_EL2); + case 6: + return read_gicreg(ICH_LR6_EL2); + case 7: + return read_gicreg(ICH_LR7_EL2); + case 8: + return read_gicreg(ICH_LR8_EL2); + case 9: + return read_gicreg(ICH_LR9_EL2); + case 10: + return read_gicreg(ICH_LR10_EL2); + case 11: + return read_gicreg(ICH_LR11_EL2); + case 12: + return read_gicreg(ICH_LR12_EL2); + case 13: + return read_gicreg(ICH_LR13_EL2); + case 14: + return read_gicreg(ICH_LR14_EL2); + case 15: + return read_gicreg(ICH_LR15_EL2); + } + + unreachable(); +} + +static void __hyp_text __gic_v3_set_lr(u64 val, int lr) +{ + switch (lr & 0xf) { + case 0: + write_gicreg(val, ICH_LR0_EL2); + break; + case 1: + write_gicreg(val, ICH_LR1_EL2); + break; + case 2: + write_gicreg(val, ICH_LR2_EL2); + break; + case 3: + write_gicreg(val, ICH_LR3_EL2); + break; + case 4: + write_gicreg(val, ICH_LR4_EL2); + break; + case 5: + write_gicreg(val, ICH_LR5_EL2); + break; + case 6: + write_gicreg(val, ICH_LR6_EL2); + break; + case 7: + write_gicreg(val, ICH_LR7_EL2); + break; + case 8: + write_gicreg(val, ICH_LR8_EL2); + break; + case 9: + write_gicreg(val, ICH_LR9_EL2); + break; + case 10: + write_gicreg(val, ICH_LR10_EL2); + break; + case 11: + write_gicreg(val, ICH_LR11_EL2); + break; + case 12: + write_gicreg(val, ICH_LR12_EL2); + break; + case 13: + write_gicreg(val, ICH_LR13_EL2); + break; + case 14: + write_gicreg(val, ICH_LR14_EL2); + break; + case 15: + write_gicreg(val, ICH_LR15_EL2); + break; + } +} + void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; u64 val; - u32 max_lr_idx, nr_pri_bits; /* * Make sure stores to the GIC via the memory mapped interface @@ -51,68 +143,58 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) dsb(st); cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); - cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); - cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); - cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); - write_gicreg(0, ICH_HCR_EL2); - val = read_gicreg(ICH_VTR_EL2); - max_lr_idx = vtr_to_max_lr_idx(val); - nr_pri_bits = vtr_to_nr_pri_bits(val); + if (vcpu->arch.vgic_cpu.live_lrs) { + int i; + u32 max_lr_idx, nr_pri_bits; - switch (max_lr_idx) { - case 15: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2); - case 14: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2); - case 13: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2); - case 12: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2); - case 11: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2); - case 10: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2); - case 9: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2); - case 8: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2); - case 7: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2); - case 6: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2); - case 5: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2); - case 4: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2); - case 3: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2); - case 2: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2); - case 1: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2); - case 0: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2); - } + cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); + cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); + cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); - switch (nr_pri_bits) { - case 7: - cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); - cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); - case 6: - cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); - default: - cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); - } + write_gicreg(0, ICH_HCR_EL2); + val = read_gicreg(ICH_VTR_EL2); + max_lr_idx = vtr_to_max_lr_idx(val); + nr_pri_bits = vtr_to_nr_pri_bits(val); - switch (nr_pri_bits) { - case 7: - cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); - cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); - case 6: - cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); - default: - cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); + for (i = 0; i <= max_lr_idx; i++) { + if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); + cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); + case 6: + cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); + default: + cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); + cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); + case 6: + cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); + default: + cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); + } + + vcpu->arch.vgic_cpu.live_lrs = 0; + } else { + cpu_if->vgic_misr = 0; + cpu_if->vgic_eisr = 0; + cpu_if->vgic_elrsr = 0xffff; + cpu_if->vgic_ap0r[0] = 0; + cpu_if->vgic_ap0r[1] = 0; + cpu_if->vgic_ap0r[2] = 0; + cpu_if->vgic_ap0r[3] = 0; + cpu_if->vgic_ap1r[0] = 0; + cpu_if->vgic_ap1r[1] = 0; + cpu_if->vgic_ap1r[2] = 0; + cpu_if->vgic_ap1r[3] = 0; } val = read_gicreg(ICC_SRE_EL2); @@ -126,6 +208,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; u64 val; u32 max_lr_idx, nr_pri_bits; + u16 live_lrs = 0; + int i; /* * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a @@ -138,66 +222,48 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); isb(); - write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); - write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); - val = read_gicreg(ICH_VTR_EL2); max_lr_idx = vtr_to_max_lr_idx(val); nr_pri_bits = vtr_to_nr_pri_bits(val); - switch (nr_pri_bits) { - case 7: - write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); - write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); - case 6: - write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); - default: - write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); + for (i = 0; i <= max_lr_idx; i++) { + if (cpu_if->vgic_lr[i] & ICH_LR_STATE) + live_lrs |= (1 << i); } - switch (nr_pri_bits) { - case 7: - write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); - write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); - case 6: - write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); - default: - write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); - } + write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); - switch (max_lr_idx) { - case 15: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); - case 14: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2); - case 13: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2); - case 12: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2); - case 11: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2); - case 10: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2); - case 9: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2); - case 8: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2); - case 7: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2); - case 6: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2); - case 5: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2); - case 4: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2); - case 3: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2); - case 2: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2); - case 1: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2); - case 0: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2); + if (live_lrs) { + write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); + write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); + write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); + } + + for (i = 0; i <= max_lr_idx; i++) { + val = 0; + + if (live_lrs & (1 << i)) + val = cpu_if->vgic_lr[i]; + + __gic_v3_set_lr(val, i); + } } /* @@ -207,6 +273,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) */ isb(); dsb(sy); + vcpu->arch.vgic_cpu.live_lrs = live_lrs; /* * Prevent the guest from touching the GIC system registers if diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index f473fd65fab5..281caf847fad 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -279,12 +279,6 @@ struct vgic_v2_cpu_if { u32 vgic_lr[VGIC_V2_MAX_LRS]; }; -/* - * LRs are stored in reverse order in memory. make sure we index them - * correctly. - */ -#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) - struct vgic_v3_cpu_if { #ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 453eafd4dd6e..11b5ff6ce81c 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -42,7 +42,7 @@ static u32 ich_vtr_el2; static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) { struct vgic_lr lr_desc; - u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)]; + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr]; if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; @@ -106,7 +106,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT; } - vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val; + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val; if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); -- cgit v1.2.3 From b4344545cf85d2a6ad546ec21dab5f76487e020e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Feb 2016 18:53:04 +0000 Subject: arm64: KVM: vgic-v3: Save maintenance interrupt state only if required Next on our list of useless accesses is the maintenance interrupt status registers (ICH_MISR_EL2, ICH_EISR_EL2). It is pointless to save them if we haven't asked for a maintenance interrupt the first place, which can only happen for two reasons: - Underflow: ICH_HCR_UIE will be set, - EOI: ICH_LR_EOI will be set. These conditions can be checked on the in-memory copies of the regs. Should any of these two condition be valid, we must read GICH_MISR. We can then check for ICH_MISR_EOI, and only when set read ICH_EISR_EL2. This means that in most case, we don't have to save them at all. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index e596945a88f7..61a5e46b4335 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -131,6 +131,35 @@ static void __hyp_text __gic_v3_set_lr(u64 val, int lr) } } +static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + int i; + bool expect_mi; + + expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE); + + for (i = 0; i < nr_lr; i++) { + if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) + continue; + + expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) && + (cpu_if->vgic_lr[i] & ICH_LR_EOI)); + } + + if (expect_mi) { + cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); + + if (cpu_if->vgic_misr & ICH_MISR_EOI) + cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); + else + cpu_if->vgic_eisr = 0; + } else { + cpu_if->vgic_misr = 0; + cpu_if->vgic_eisr = 0; + } +} + void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; @@ -148,8 +177,6 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) int i; u32 max_lr_idx, nr_pri_bits; - cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); - cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); write_gicreg(0, ICH_HCR_EL2); @@ -157,6 +184,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) max_lr_idx = vtr_to_max_lr_idx(val); nr_pri_bits = vtr_to_nr_pri_bits(val); + save_maint_int_state(vcpu, max_lr_idx + 1); + for (i = 0; i <= max_lr_idx; i++) { if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); -- cgit v1.2.3 From 84e8b9c88d5fe9c9a59ed24ae44d7ac0983df92b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Feb 2016 17:09:49 +0000 Subject: arm64: KVM: vgic-v3: Do not save an LR known to be empty On exit, any empty LR will be signaled in ICH_ELRSR_EL2. Which means that we do not have to save it, and we can just clear its state in the in-memory copy. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 61a5e46b4335..0db426e6c13e 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -187,8 +187,15 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) save_maint_int_state(vcpu, max_lr_idx + 1); for (i = 0; i <= max_lr_idx; i++) { - if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) - cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); + if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) + continue; + + if (cpu_if->vgic_elrsr & (1 << i)) { + cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; + continue; + } + + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); } switch (nr_pri_bits) { -- cgit v1.2.3 From 0d98d00b8d80bfdee95cf7e85f20f107377e2662 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 3 Mar 2016 15:43:58 +0000 Subject: arm64: KVM: vgic-v3: Reset LRs at boot time In order to let the GICv3 code be more lazy in the way it accesses the LRs, it is necessary to start with a clean slate. Let's reset the LRs on each CPU when the vgic is probed (which includes a round trip to EL2...). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kvm/hyp/vgic-v3-sr.c | 9 +++++++++ virt/kvm/arm/vgic-v3.c | 7 +++++++ 3 files changed, 17 insertions(+) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 1037392ae134..2d02ba67478c 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -42,6 +42,7 @@ extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); +extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 0db426e6c13e..81349479e17c 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -321,6 +321,15 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) } } +void __hyp_text __vgic_v3_init_lrs(void) +{ + int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); + int i; + + for (i = 0; i <= max_lr_idx; i++) + __gic_v3_set_lr(0, i); +} + static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) { return read_gicreg(ICH_VTR_EL2); diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 11b5ff6ce81c..999bdc6d9d9f 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -216,6 +216,11 @@ static const struct vgic_ops vgic_v3_ops = { static struct vgic_params vgic_v3_params; +static void vgic_cpu_init_lrs(void *params) +{ + kvm_call_hyp(__vgic_v3_init_lrs); +} + /** * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT * @node: pointer to the DT node @@ -284,6 +289,8 @@ int vgic_v3_probe(struct device_node *vgic_node, kvm_info("%s@%llx IRQ%d\n", vgic_node->name, vcpu_res.start, vgic->maint_irq); + on_each_cpu(vgic_cpu_init_lrs, vgic, 1); + *ops = &vgic_v3_ops; *params = vgic; -- cgit v1.2.3 From b40c4892d175874d118860c8282a85ee7b64bcbb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Feb 2016 17:36:09 +0000 Subject: arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit So far, we're always writing all possible LRs, setting the empty ones with a zero value. This is obvious doing a low of work for nothing, and we're better off clearing those we've actually dirtied on the exit path (it is very rare to inject more than one interrupt at a time anyway). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 81349479e17c..fff7cd42b3a3 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -196,6 +196,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) } cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); + __gic_v3_set_lr(0, i); } switch (nr_pri_bits) { @@ -293,12 +294,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) } for (i = 0; i <= max_lr_idx; i++) { - val = 0; - - if (live_lrs & (1 << i)) - val = cpu_if->vgic_lr[i]; + if (!(live_lrs & (1 << i))) + continue; - __gic_v3_set_lr(val, i); + __gic_v3_set_lr(cpu_if->vgic_lr[i], i); } } -- cgit v1.2.3