diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 09:55:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-16 09:55:35 -0700 |
commit | 10dc3747661bea9215417b659449bb7b8ed3df2c (patch) | |
tree | d943974b4941203a7db2fabe4896852cf0f16bc4 /arch/arm64/include | |
parent | 047486d8e7c2a7e8d75b068b69cb67b47364f5d4 (diff) | |
parent | f958ee745f70b60d0e41927cab2c073104bc70c2 (diff) | |
download | linux-10dc3747661bea9215417b659449bb7b8ed3df2c.tar.bz2 |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"One of the largest releases for KVM... Hardly any generic
changes, but lots of architecture-specific updates.
ARM:
- VHE support so that we can run the kernel at EL2 on ARMv8.1 systems
- PMU support for guests
- 32bit world switch rewritten in C
- various optimizations to the vgic save/restore code.
PPC:
- enabled KVM-VFIO integration ("VFIO device")
- optimizations to speed up IPIs between vcpus
- in-kernel handling of IOMMU hypercalls
- support for dynamic DMA windows (DDW).
s390:
- provide the floating point registers via sync regs;
- separated instruction vs. data accesses
- dirty log improvements for huge guests
- bugfixes and documentation improvements.
x86:
- Hyper-V VMBus hypercall userspace exit
- alternative implementation of lowest-priority interrupts using
vector hashing (for better VT-d posted interrupt support)
- fixed guest debugging with nested virtualizations
- improved interrupt tracking in the in-kernel IOAPIC
- generic infrastructure for tracking writes to guest
memory - currently its only use is to speedup the legacy shadow
paging (pre-EPT) case, but in the future it will be used for
virtual GPUs as well
- much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits)
KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch
KVM: x86: disable MPX if host did not enable MPX XSAVE features
arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit
arm64: KVM: vgic-v3: Reset LRs at boot time
arm64: KVM: vgic-v3: Do not save an LR known to be empty
arm64: KVM: vgic-v3: Save maintenance interrupt state only if required
arm64: KVM: vgic-v3: Avoid accessing ICH registers
KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit
KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit
KVM: arm/arm64: vgic-v2: Reset LRs at boot time
KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty
KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function
KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required
KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
KVM: s390: allocate only one DMA page per VM
KVM: s390: enable STFLE interpretation only if enabled for the guest
KVM: s390: wake up when the VCPU cpu timer expires
KVM: s390: step the VCPU timer while in enabled wait
KVM: s390: protect VCPU cpu timer with a seqcount
KVM: s390: step VCPU cpu timer during kvm_run ioctl
...
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/hw_breakpoint.h | 18 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 34 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_hyp.h | 181 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_perf_event.h | 68 | ||||
-rw-r--r-- | arch/arm64/include/asm/virt.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/kvm.h | 6 |
11 files changed, 343 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8f271b83f910..a5c769b1c65b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -30,8 +30,12 @@ #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 #define ARM64_WORKAROUND_834220 7 +/* #define ARM64_HAS_NO_HW_PREFETCH 8 */ +/* #define ARM64_HAS_UAO 9 */ +/* #define ARM64_ALT_PAN_NOT_UAO 10 */ +#define ARM64_HAS_VIRT_HOST_EXTN 11 -#define ARM64_NCAPS 8 +#define ARM64_NCAPS 12 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 9732908bfc8a..115ea2a64520 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -18,6 +18,7 @@ #include <asm/cputype.h> #include <asm/cpufeature.h> +#include <asm/virt.h> #ifdef __KERNEL__ @@ -35,10 +36,21 @@ struct arch_hw_breakpoint { struct arch_hw_breakpoint_ctrl ctrl; }; +/* Privilege Levels */ +#define AARCH64_BREAKPOINT_EL1 1 +#define AARCH64_BREAKPOINT_EL0 2 + +#define DBG_HMC_HYP (1 << 13) + static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) { - return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) | + u32 val = (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) | ctrl.enabled; + + if (is_kernel_in_hyp_mode() && ctrl.privilege == AARCH64_BREAKPOINT_EL1) + val |= DBG_HMC_HYP; + + return val; } static inline void decode_ctrl_reg(u32 reg, @@ -61,10 +73,6 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_BREAKPOINT_STORE 2 #define AARCH64_ESR_ACCESS_MASK (1 << 6) -/* Privilege Levels */ -#define AARCH64_BREAKPOINT_EL1 1 -#define AARCH64_BREAKPOINT_EL0 2 - /* Lengths */ #define ARM_BREAKPOINT_LEN_1 0x1 #define ARM_BREAKPOINT_LEN_2 0x3 diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index d201d4b396d1..b56a0a81e4cb 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -23,6 +23,7 @@ #include <asm/types.h> /* Hyp Configuration Register (HCR) bits */ +#define HCR_E2H (UL(1) << 34) #define HCR_ID (UL(1) << 33) #define HCR_CD (UL(1) << 32) #define HCR_RW_SHIFT 31 @@ -81,7 +82,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) - +#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* Hyp System Control Register (SCTLR_EL2) bits */ #define SCTLR_EL2_EE (1 << 25) @@ -216,4 +217,7 @@ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64) +#define CPACR_EL1_FPEN (3 << 20) +#define CPACR_EL1_TTA (1 << 28) + #endif /* __ARM64_KVM_ARM_H__ */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 52b777b7d407..2d02ba67478c 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -35,9 +35,6 @@ extern char __kvm_hyp_init_end[]; extern char __kvm_hyp_vector[]; -#define __kvm_hyp_code_start __hyp_text_start -#define __kvm_hyp_code_end __hyp_text_end - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); @@ -45,9 +42,12 @@ extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); +extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); +extern void __init_stage2_translation(void); + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 779a5872a2c5..40bc1681b6d5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -29,6 +29,7 @@ #include <asm/kvm_mmio.h> #include <asm/ptrace.h> #include <asm/cputype.h> +#include <asm/virt.h> unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); @@ -43,6 +44,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + if (is_kernel_in_hyp_mode()) + vcpu->arch.hcr_el2 |= HCR_E2H; if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) vcpu->arch.hcr_el2 &= ~HCR_RW; } @@ -189,6 +192,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } +static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); +} + static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 689d4c95e12f..71fa6fe9d54a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -25,7 +25,9 @@ #include <linux/types.h> #include <linux/kvm_types.h> #include <asm/kvm.h> +#include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> +#include <asm/kvm_perf_event.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -36,10 +38,11 @@ #include <kvm/arm_vgic.h> #include <kvm/arm_arch_timer.h> +#include <kvm/arm_pmu.h> #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 3 +#define KVM_VCPU_MAX_FEATURES 4 int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -114,6 +117,21 @@ enum vcpu_sysreg { MDSCR_EL1, /* Monitor Debug System Control Register */ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ + /* Performance Monitors Registers */ + PMCR_EL0, /* Control Register */ + PMSELR_EL0, /* Event Counter Selection Register */ + PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ + PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, + PMCCNTR_EL0, /* Cycle Counter Register */ + PMEVTYPER0_EL0, /* Event Type Register (0-30) */ + PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, + PMCCFILTR_EL0, /* Cycle Count Filter Register */ + PMCNTENSET_EL0, /* Count Enable Set Register */ + PMINTENSET_EL1, /* Interrupt Enable Set Register */ + PMOVSSET_EL0, /* Overflow Flag Status Set Register */ + PMSWINC_EL0, /* Software Increment Register */ + PMUSERENR_EL0, /* User Enable Register */ + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ @@ -211,6 +229,7 @@ struct kvm_vcpu_arch { /* VGIC state */ struct vgic_cpu vgic_cpu; struct arch_timer_cpu timer_cpu; + struct kvm_pmu pmu; /* * Anything that is not used directly from assembly code goes @@ -342,5 +361,18 @@ void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); + +/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */ + +static inline void __cpu_init_stage2(void) +{ + kvm_call_hyp(__init_stage2_translation); +} #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..a46b019ebcf5 --- /dev/null +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_HYP_H__ +#define __ARM64_KVM_HYP_H__ + +#include <linux/compiler.h> +#include <linux/kvm_host.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_perf_event.h> +#include <asm/sysreg.h> + +#define __hyp_text __section(.hyp.text) notrace + +static inline unsigned long __kern_hyp_va(unsigned long v) +{ + asm volatile(ALTERNATIVE("and %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); + return v; +} + +#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) + +static inline unsigned long __hyp_kern_va(unsigned long v) +{ + u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; + asm volatile(ALTERNATIVE("add %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "r" (offset)); + return v; +} + +#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) + +#define read_sysreg_elx(r,nvh,vh) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ + "mrs_s %0, " __stringify(r##vh),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_elx(v,r,nvh,vh) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ + "msr_s " __stringify(r##vh) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding. + */ +#define read_sysreg_el2(r) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ + "mrs %0, " __stringify(r##_EL1),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_el2(v,r) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ + "msr " __stringify(r##_EL1) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) +#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) +#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) +#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) + +/* The VHE specific system registers and their encoding */ +#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) +#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) +#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) +#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) +#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) +#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) +#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) +#define esr_EL12 sys_reg(3, 5, 5, 2, 0) +#define far_EL12 sys_reg(3, 5, 6, 0, 0) +#define mair_EL12 sys_reg(3, 5, 10, 2, 0) +#define amair_EL12 sys_reg(3, 5, 10, 3, 0) +#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) +#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) +#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) +#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) +#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) +#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) +#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) +#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) +#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) +#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) +#define elr_EL12 sys_reg(3, 5, 4, 0, 1) + +/** + * hyp_alternate_select - Generates patchable code sequences that are + * used to switch between two implementations of a function, depending + * on the availability of a feature. + * + * @fname: a symbol name that will be defined as a function returning a + * function pointer whose type will match @orig and @alt + * @orig: A pointer to the default function, as returned by @fname when + * @cond doesn't hold + * @alt: A pointer to the alternate function, as returned by @fname + * when @cond holds + * @cond: a CPU feature (as described in asm/cpufeature.h) + */ +#define hyp_alternate_select(fname, orig, alt, cond) \ +typeof(orig) * __hyp_text fname(void) \ +{ \ + typeof(alt) *val = orig; \ + asm volatile(ALTERNATIVE("nop \n", \ + "mov %0, %1 \n", \ + cond) \ + : "+r" (val) : "r" (alt)); \ + return val; \ +} + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v3_save_state(struct kvm_vcpu *vcpu); +void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg32_save_state(struct kvm_vcpu *vcpu); +void __sysreg32_restore_state(struct kvm_vcpu *vcpu); + +void __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); +void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); + +void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); +void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +bool __fpsimd_enabled(void); + +u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); +void __noreturn __hyp_do_panic(unsigned long, ...); + +#endif /* __ARM64_KVM_HYP_H__ */ + diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 736433912a1e..9a9318adefa6 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -23,13 +23,16 @@ #include <asm/cpufeature.h> /* - * As we only have the TTBR0_EL2 register, we cannot express + * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express * "negative" addresses. This makes it impossible to directly share * mappings with the kernel. * * Instead, give the HYP mode its own VA region at a fixed offset from * the kernel by just masking the top bits (which are all ones for a * kernel address). + * + * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these + * macros (the entire kernel runs at EL2). */ #define HYP_PAGE_OFFSET_SHIFT VA_BITS #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) @@ -56,12 +59,19 @@ #ifdef __ASSEMBLY__ +#include <asm/alternative.h> +#include <asm/cpufeature.h> + /* * Convert a kernel VA into a HYP VA. * reg: VA to be converted. */ .macro kern_hyp_va reg +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN and \reg, \reg, #HYP_PAGE_OFFSET_MASK +alternative_else + nop +alternative_endif .endm #else diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h new file mode 100644 index 000000000000..c18fdebb8f66 --- /dev/null +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_KVM_PERF_EVENT_H +#define __ASM_KVM_PERF_EVENT_H + +#define ARMV8_PMU_MAX_COUNTERS 32 +#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) + +/* + * Per-CPU PMCR: config reg + */ +#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ +#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ +#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ +#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ +#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +/* Determines which bit of PMCCNTR_EL0 generates an overflow */ +#define ARMV8_PMU_PMCR_LC (1 << 6) +#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ +#define ARMV8_PMU_PMCR_N_MASK 0x1f +#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ + +/* + * PMOVSR: counters overflow flag status reg + */ +#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK + +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ +#define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ + +#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ + +/* + * Event filters for PMUv3 + */ +#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) +#define ARMV8_PMU_INCLUDE_EL2 (1 << 27) + +/* + * PMUSERENR: user enable reg + */ +#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ +#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ +#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ +#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ + +#endif diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 7a5df5252dd7..9f22dd607958 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -23,6 +23,8 @@ #ifndef __ASSEMBLY__ +#include <asm/ptrace.h> + /* * __boot_cpu_mode records what mode CPUs were booted in. * A correctly-implemented bootloader must start all CPUs in the same mode: @@ -50,6 +52,14 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } +static inline bool is_kernel_in_hyp_mode(void) +{ + u64 el; + + asm("mrs %0, CurrentEL" : "=r" (el)); + return el == CurrentEL_EL2; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 2d4ca4bb0dd3..f209ea151dca 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -94,6 +94,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ +#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ struct kvm_vcpu_init { __u32 target; @@ -204,6 +205,11 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_VGIC_GRP_CTRL 4 #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +/* Device Control API on vcpu fd */ +#define KVM_ARM_VCPU_PMU_V3_CTRL 0 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff |