diff options
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/hyp-init.S | 18 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp.S | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/fpsimd.S | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 27 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/switch.c | 28 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/sysreg-sr.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/tlb.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 103 |
8 files changed, 159 insertions, 45 deletions
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 160be2b4696d..6e6ed5581eed 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -18,7 +18,7 @@ .align 11 -ENTRY(__kvm_hyp_init) +SYM_CODE_START(__kvm_hyp_init) ventry __invalid // Synchronous EL2t ventry __invalid // IRQ EL2t ventry __invalid // FIQ EL2t @@ -60,7 +60,7 @@ alternative_else_nop_endif msr ttbr0_el2, x4 mrs x4, tcr_el1 - ldr x5, =TCR_EL2_MASK + mov_q x5, TCR_EL2_MASK and x4, x4, x5 mov x5, #TCR_EL2_RES1 orr x4, x4, x5 @@ -102,7 +102,7 @@ alternative_else_nop_endif * as well as the EE bit on BE. Drop the A flag since the compiler * is allowed to generate unaligned accesses. */ - ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) + mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) CPU_BE( orr x4, x4, #SCTLR_ELx_EE) msr sctlr_el2, x4 isb @@ -117,9 +117,9 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE) /* Hello, World! */ eret -ENDPROC(__kvm_hyp_init) +SYM_CODE_END(__kvm_hyp_init) -ENTRY(__kvm_handle_stub_hvc) +SYM_CODE_START(__kvm_handle_stub_hvc) cmp x0, #HVC_SOFT_RESTART b.ne 1f @@ -142,7 +142,7 @@ reset: * case we coming via HVC_SOFT_RESTART. */ mrs x5, sctlr_el2 - ldr x6, =SCTLR_ELx_FLAGS + mov_q x6, SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc pre_disable_mmu_workaround msr sctlr_el2, x5 @@ -155,11 +155,9 @@ reset: eret 1: /* Bad stub call */ - ldr x0, =HVC_STUB_ERR + mov_q x0, HVC_STUB_ERR eret -ENDPROC(__kvm_handle_stub_hvc) - - .ltorg +SYM_CODE_END(__kvm_handle_stub_hvc) .popsection diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index c0094d520dff..3c79a1124af2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -28,7 +28,7 @@ * and is used to implement hyp stubs in the same way as in * arch/arm64/kernel/hyp_stub.S. */ -ENTRY(__kvm_call_hyp) +SYM_FUNC_START(__kvm_call_hyp) hvc #0 ret -ENDPROC(__kvm_call_hyp) +SYM_FUNC_END(__kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 78ff53225691..5b8ff517ff10 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -11,12 +11,12 @@ .text .pushsection .hyp.text, "ax" -ENTRY(__fpsimd_save_state) +SYM_FUNC_START(__fpsimd_save_state) fpsimd_save x0, 1 ret -ENDPROC(__fpsimd_save_state) +SYM_FUNC_END(__fpsimd_save_state) -ENTRY(__fpsimd_restore_state) +SYM_FUNC_START(__fpsimd_restore_state) fpsimd_restore x0, 1 ret -ENDPROC(__fpsimd_restore_state) +SYM_FUNC_END(__fpsimd_restore_state) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index ffa68d5713f1..c2a13ab3c471 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -180,7 +180,7 @@ el2_error: eret sb -ENTRY(__hyp_do_panic) +SYM_FUNC_START(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr @@ -188,18 +188,19 @@ ENTRY(__hyp_do_panic) msr elr_el2, lr eret sb -ENDPROC(__hyp_do_panic) +SYM_FUNC_END(__hyp_do_panic) -ENTRY(__hyp_panic) +SYM_CODE_START(__hyp_panic) get_host_ctxt x0, x1 b hyp_panic -ENDPROC(__hyp_panic) +SYM_CODE_END(__hyp_panic) .macro invalid_vector label, target = __hyp_panic .align 2 +SYM_CODE_START(\label) \label: b \target -ENDPROC(\label) +SYM_CODE_END(\label) .endm /* None of these should ever happen */ @@ -246,7 +247,7 @@ check_preamble_length 661b, 662b check_preamble_length 661b, 662b .endm -ENTRY(__kvm_hyp_vector) +SYM_CODE_START(__kvm_hyp_vector) invalid_vect el2t_sync_invalid // Synchronous EL2t invalid_vect el2t_irq_invalid // IRQ EL2t invalid_vect el2t_fiq_invalid // FIQ EL2t @@ -266,7 +267,7 @@ ENTRY(__kvm_hyp_vector) valid_vect el1_irq // IRQ 32-bit EL1 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 valid_vect el1_error // Error 32-bit EL1 -ENDPROC(__kvm_hyp_vector) +SYM_CODE_END(__kvm_hyp_vector) #ifdef CONFIG_KVM_INDIRECT_VECTORS .macro hyp_ventry @@ -311,15 +312,17 @@ alternative_cb_end .endm .align 11 -ENTRY(__bp_harden_hyp_vecs_start) +SYM_CODE_START(__bp_harden_hyp_vecs) .rept BP_HARDEN_EL2_SLOTS generate_vectors .endr -ENTRY(__bp_harden_hyp_vecs_end) +1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ + .org 1b +SYM_CODE_END(__bp_harden_hyp_vecs) .popsection -ENTRY(__smccc_workaround_1_smc_start) +SYM_CODE_START(__smccc_workaround_1_smc) esb sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] @@ -329,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start) ldp x2, x3, [sp, #(8 * 0)] ldp x0, x1, [sp, #(8 * 2)] add sp, sp, #(8 * 4) -ENTRY(__smccc_workaround_1_smc_end) +1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ + .org 1b +SYM_CODE_END(__smccc_workaround_1_smc) #endif diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 925086b46136..eaa05c3c7235 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -98,6 +98,18 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; val &= ~CPACR_EL1_ZEN; + + /* + * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to + * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2, + * except for some missing controls, such as TAM. + * In this case, CPTR_EL2.TAM has the same position with or without + * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM + * shift value for trapping the AMU accesses. + */ + + val |= CPTR_EL2_TAM; + if (update_fp_enabled(vcpu)) { if (vcpu_has_sve(vcpu)) val |= CPACR_EL1_ZEN; @@ -119,7 +131,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ; + val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { val |= CPTR_EL2_TFP; __activate_traps_fpsimd32(vcpu); @@ -127,7 +139,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) write_sysreg(val, cptr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; isb(); @@ -146,12 +158,12 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 hcr = vcpu->arch.hcr_el2; - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) hcr |= HCR_TVM; write_sysreg(hcr, hcr_el2); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); if (has_vhe()) @@ -181,7 +193,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) { u64 mdcr_el2 = read_sysreg(mdcr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -328,7 +340,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) * resolve the IPA using the AT instruction. */ if (!(esr & ESR_ELx_S1PTW) && - (cpus_have_const_cap(ARM64_WORKAROUND_834220) || + (cpus_have_final_cap(ARM64_WORKAROUND_834220) || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { if (!__translate_far_to_hpfar(far, &hpfar)) return false; @@ -498,7 +510,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (*exit_code != ARM_EXCEPTION_TRAP) goto exit; - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && + if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 && handle_tx2_tvm(vcpu)) return true; @@ -555,7 +567,7 @@ exit: static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) { - if (!cpus_have_const_cap(ARM64_SSBD)) + if (!cpus_have_final_cap(ARM64_SSBD)) return false; return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7672a978926c..75b1925763f1 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -71,7 +71,7 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); } @@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); } else if (!ctxt->__hyp_running_vcpu) { @@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && ctxt->__hyp_running_vcpu) { /* * Must only be done for host registers, hence the context @@ -194,7 +194,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR); write_sysreg_el2(pstate, SYS_SPSR); - if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); } diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 92f560e3e1aa..ceaddbe4279f 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, local_irq_save(cxt->flags); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* * For CPUs that are affected by ARM errata 1165522 or 1530923, * we cannot trust stage-1 to be in a correct state at that @@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, struct tlb_inv_context *cxt) { - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { u64 val; /* @@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->sctlr, SYS_SCTLR); @@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, { write_sysreg(0, vttbr_el2); - if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { /* Ensure write of the host VMID */ isb(); /* Restore the host's TCR_EL1 */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3e909b117f0c..090f46d3add1 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1003,6 +1003,20 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } +static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + kvm_inject_undefined(vcpu); + + return false; +} + +/* Macro to expand the AMU counter and type registers*/ +#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu } +#define AMU_AMEVTYPE0_EL0(n) { SYS_DESC(SYS_AMEVTYPE0_EL0(n)), access_amu } +#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu } +#define AMU_AMEVTYPE1_EL0(n) { SYS_DESC(SYS_AMEVTYPE1_EL0(n)), access_amu } + static bool trap_ptrauth(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) @@ -1078,13 +1092,25 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { - val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + if (id == SYS_ID_AA64PFR0_EL1) { + if (!vcpu_has_sve(vcpu)) + val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); + val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | (0xfUL << ID_AA64ISAR1_API_SHIFT) | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); + } else if (id == SYS_ID_AA64DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_AA64DFR0_PMUVER_SHIFT, + ID_AA64DFR0_PMUVER_8_1); + } else if (id == SYS_ID_DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_DFR0_PERFMON_SHIFT, + ID_DFR0_PERFMON_8_1); } return val; @@ -1565,6 +1591,79 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, + { SYS_DESC(SYS_AMCR_EL0), access_amu }, + { SYS_DESC(SYS_AMCFGR_EL0), access_amu }, + { SYS_DESC(SYS_AMCGCR_EL0), access_amu }, + { SYS_DESC(SYS_AMUSERENR_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu }, + { SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu }, + AMU_AMEVCNTR0_EL0(0), + AMU_AMEVCNTR0_EL0(1), + AMU_AMEVCNTR0_EL0(2), + AMU_AMEVCNTR0_EL0(3), + AMU_AMEVCNTR0_EL0(4), + AMU_AMEVCNTR0_EL0(5), + AMU_AMEVCNTR0_EL0(6), + AMU_AMEVCNTR0_EL0(7), + AMU_AMEVCNTR0_EL0(8), + AMU_AMEVCNTR0_EL0(9), + AMU_AMEVCNTR0_EL0(10), + AMU_AMEVCNTR0_EL0(11), + AMU_AMEVCNTR0_EL0(12), + AMU_AMEVCNTR0_EL0(13), + AMU_AMEVCNTR0_EL0(14), + AMU_AMEVCNTR0_EL0(15), + AMU_AMEVTYPE0_EL0(0), + AMU_AMEVTYPE0_EL0(1), + AMU_AMEVTYPE0_EL0(2), + AMU_AMEVTYPE0_EL0(3), + AMU_AMEVTYPE0_EL0(4), + AMU_AMEVTYPE0_EL0(5), + AMU_AMEVTYPE0_EL0(6), + AMU_AMEVTYPE0_EL0(7), + AMU_AMEVTYPE0_EL0(8), + AMU_AMEVTYPE0_EL0(9), + AMU_AMEVTYPE0_EL0(10), + AMU_AMEVTYPE0_EL0(11), + AMU_AMEVTYPE0_EL0(12), + AMU_AMEVTYPE0_EL0(13), + AMU_AMEVTYPE0_EL0(14), + AMU_AMEVTYPE0_EL0(15), + AMU_AMEVCNTR1_EL0(0), + AMU_AMEVCNTR1_EL0(1), + AMU_AMEVCNTR1_EL0(2), + AMU_AMEVCNTR1_EL0(3), + AMU_AMEVCNTR1_EL0(4), + AMU_AMEVCNTR1_EL0(5), + AMU_AMEVCNTR1_EL0(6), + AMU_AMEVCNTR1_EL0(7), + AMU_AMEVCNTR1_EL0(8), + AMU_AMEVCNTR1_EL0(9), + AMU_AMEVCNTR1_EL0(10), + AMU_AMEVCNTR1_EL0(11), + AMU_AMEVCNTR1_EL0(12), + AMU_AMEVCNTR1_EL0(13), + AMU_AMEVCNTR1_EL0(14), + AMU_AMEVCNTR1_EL0(15), + AMU_AMEVTYPE1_EL0(0), + AMU_AMEVTYPE1_EL0(1), + AMU_AMEVTYPE1_EL0(2), + AMU_AMEVTYPE1_EL0(3), + AMU_AMEVTYPE1_EL0(4), + AMU_AMEVTYPE1_EL0(5), + AMU_AMEVTYPE1_EL0(6), + AMU_AMEVTYPE1_EL0(7), + AMU_AMEVTYPE1_EL0(8), + AMU_AMEVTYPE1_EL0(9), + AMU_AMEVTYPE1_EL0(10), + AMU_AMEVTYPE1_EL0(11), + AMU_AMEVTYPE1_EL0(12), + AMU_AMEVTYPE1_EL0(13), + AMU_AMEVTYPE1_EL0(14), + AMU_AMEVTYPE1_EL0(15), + { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, |