summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 09:55:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 09:55:35 -0700
commit10dc3747661bea9215417b659449bb7b8ed3df2c (patch)
treed943974b4941203a7db2fabe4896852cf0f16bc4 /arch/arm/include
parent047486d8e7c2a7e8d75b068b69cb67b47364f5d4 (diff)
parentf958ee745f70b60d0e41927cab2c073104bc70c2 (diff)
downloadlinux-10dc3747661bea9215417b659449bb7b8ed3df2c.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "One of the largest releases for KVM... Hardly any generic changes, but lots of architecture-specific updates. ARM: - VHE support so that we can run the kernel at EL2 on ARMv8.1 systems - PMU support for guests - 32bit world switch rewritten in C - various optimizations to the vgic save/restore code. PPC: - enabled KVM-VFIO integration ("VFIO device") - optimizations to speed up IPIs between vcpus - in-kernel handling of IOMMU hypercalls - support for dynamic DMA windows (DDW). s390: - provide the floating point registers via sync regs; - separated instruction vs. data accesses - dirty log improvements for huge guests - bugfixes and documentation improvements. x86: - Hyper-V VMBus hypercall userspace exit - alternative implementation of lowest-priority interrupts using vector hashing (for better VT-d posted interrupt support) - fixed guest debugging with nested virtualizations - improved interrupt tracking in the in-kernel IOAPIC - generic infrastructure for tracking writes to guest memory - currently its only use is to speedup the legacy shadow paging (pre-EPT) case, but in the future it will be used for virtual GPUs as well - much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits) KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch KVM: x86: disable MPX if host did not enable MPX XSAVE features arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit arm64: KVM: vgic-v3: Reset LRs at boot time arm64: KVM: vgic-v3: Do not save an LR known to be empty arm64: KVM: vgic-v3: Save maintenance interrupt state only if required arm64: KVM: vgic-v3: Avoid accessing ICH registers KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit KVM: arm/arm64: vgic-v2: Reset LRs at boot time KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers KVM: s390: allocate only one DMA page per VM KVM: s390: enable STFLE interpretation only if enabled for the guest KVM: s390: wake up when the VCPU cpu timer expires KVM: s390: step the VCPU timer while in enabled wait KVM: s390: protect VCPU cpu timer with a seqcount KVM: s390: step VCPU cpu timer during kvm_run ioctl ...
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/kvm_asm.h41
-rw-r--r--arch/arm/include/asm/kvm_emulate.h20
-rw-r--r--arch/arm/include/asm/kvm_host.h80
-rw-r--r--arch/arm/include/asm/kvm_hyp.h139
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/include/asm/virt.h9
6 files changed, 232 insertions, 59 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 194c91b610ff..15d58b42d5a1 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -19,38 +19,7 @@
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
-/* 0 is reserved as an invalid value. */
-#define c0_MPIDR 1 /* MultiProcessor ID Register */
-#define c0_CSSELR 2 /* Cache Size Selection Register */
-#define c1_SCTLR 3 /* System Control Register */
-#define c1_ACTLR 4 /* Auxiliary Control Register */
-#define c1_CPACR 5 /* Coprocessor Access Control */
-#define c2_TTBR0 6 /* Translation Table Base Register 0 */
-#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
-#define c2_TTBR1 8 /* Translation Table Base Register 1 */
-#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
-#define c2_TTBCR 10 /* Translation Table Base Control R. */
-#define c3_DACR 11 /* Domain Access Control Register */
-#define c5_DFSR 12 /* Data Fault Status Register */
-#define c5_IFSR 13 /* Instruction Fault Status Register */
-#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
-#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
-#define c6_DFAR 16 /* Data Fault Address Register */
-#define c6_IFAR 17 /* Instruction Fault Address Register */
-#define c7_PAR 18 /* Physical Address Register */
-#define c7_PAR_high 19 /* PAR top 32 bits */
-#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
-#define c10_PRRR 21 /* Primary Region Remap Register */
-#define c10_NMRR 22 /* Normal Memory Remap Register */
-#define c12_VBAR 23 /* Vector Base Address Register */
-#define c13_CID 24 /* Context ID Register */
-#define c13_TID_URW 25 /* Thread ID, User R/W */
-#define c13_TID_URO 26 /* Thread ID, User R/O */
-#define c13_TID_PRIV 27 /* Thread ID, Privileged */
-#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
-#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
-#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
-#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
+#include <asm/virt.h>
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
@@ -86,19 +55,15 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
-extern char __kvm_hyp_exit[];
-extern char __kvm_hyp_exit_end[];
-
extern char __kvm_hyp_vector[];
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
-
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern void __init_stage2_translation(void);
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 3095df091ff8..ee5328fc4b06 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -68,12 +68,12 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.regs.usr_regs.ARM_pc;
+ return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
}
static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.regs.usr_regs.ARM_cpsr;
+ return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -83,13 +83,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
{
- unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
}
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
{
- unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return cpsr_mode > USR_MODE;;
}
@@ -108,11 +108,6 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
}
-static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hyp_pc;
-}
-
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
@@ -143,6 +138,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}
+static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
+}
+
/* Get Access Size from a data abort */
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
@@ -192,7 +192,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
+ return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f9f27792d8ed..385070180c25 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -85,20 +85,61 @@ struct kvm_vcpu_fault_info {
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
u32 hpfar; /* Hyp IPA Fault Address Register */
- u32 hyp_pc; /* PC when exception was taken from Hyp mode */
};
-typedef struct vfp_hard_struct kvm_cpu_context_t;
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+ __INVALID_SYSREG__,
+ c0_MPIDR, /* MultiProcessor ID Register */
+ c0_CSSELR, /* Cache Size Selection Register */
+ c1_SCTLR, /* System Control Register */
+ c1_ACTLR, /* Auxiliary Control Register */
+ c1_CPACR, /* Coprocessor Access Control */
+ c2_TTBR0, /* Translation Table Base Register 0 */
+ c2_TTBR0_high, /* TTBR0 top 32 bits */
+ c2_TTBR1, /* Translation Table Base Register 1 */
+ c2_TTBR1_high, /* TTBR1 top 32 bits */
+ c2_TTBCR, /* Translation Table Base Control R. */
+ c3_DACR, /* Domain Access Control Register */
+ c5_DFSR, /* Data Fault Status Register */
+ c5_IFSR, /* Instruction Fault Status Register */
+ c5_ADFSR, /* Auxilary Data Fault Status R */
+ c5_AIFSR, /* Auxilary Instrunction Fault Status R */
+ c6_DFAR, /* Data Fault Address Register */
+ c6_IFAR, /* Instruction Fault Address Register */
+ c7_PAR, /* Physical Address Register */
+ c7_PAR_high, /* PAR top 32 bits */
+ c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
+ c10_PRRR, /* Primary Region Remap Register */
+ c10_NMRR, /* Normal Memory Remap Register */
+ c12_VBAR, /* Vector Base Address Register */
+ c13_CID, /* Context ID Register */
+ c13_TID_URW, /* Thread ID, User R/W */
+ c13_TID_URO, /* Thread ID, User R/O */
+ c13_TID_PRIV, /* Thread ID, Privileged */
+ c14_CNTKCTL, /* Timer Control Register (PL1) */
+ c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
+ c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
+ NR_CP15_REGS /* Number of regs (incl. invalid) */
+};
+
+struct kvm_cpu_context {
+ struct kvm_regs gp_regs;
+ struct vfp_hard_struct vfp;
+ u32 cp15[NR_CP15_REGS];
+};
+
+typedef struct kvm_cpu_context kvm_cpu_context_t;
struct kvm_vcpu_arch {
- struct kvm_regs regs;
+ struct kvm_cpu_context ctxt;
int target; /* Processor target */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
- /* System control coprocessor (cp15) */
- u32 cp15[NR_CP15_REGS];
-
/* The CPU type we expose to the VM */
u32 midr;
@@ -111,9 +152,6 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Floating point registers (VFP and Advanced SIMD/NEON) */
- struct vfp_hard_struct vfp_guest;
-
/* Host FP context */
kvm_cpu_context_t *host_cpu_context;
@@ -158,12 +196,14 @@ struct kvm_vcpu_stat {
u64 exits;
};
+#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
+
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-u64 kvm_call_hyp(void *hypfn, ...);
+unsigned long kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -220,6 +260,11 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
+static inline void __cpu_init_stage2(void)
+{
+ kvm_call_hyp(__init_stage2_translation);
+}
+
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
@@ -242,5 +287,20 @@ static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
new file mode 100644
index 000000000000..f0e860761380
--- /dev/null
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_HYP_H__
+#define __ARM_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/vfp.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (v)
+#define hyp_kern_va(v) (v)
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+#define __ACCESS_VFP(CRn) \
+ "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define write_special(v, r) \
+ asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
+#define read_special(r) ({ \
+ u32 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+#define TTBR0 __ACCESS_CP15_64(0, c2)
+#define TTBR1 __ACCESS_CP15_64(1, c2)
+#define VTTBR __ACCESS_CP15_64(6, c2)
+#define PAR __ACCESS_CP15_64(0, c7)
+#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
+#define CNTVOFF __ACCESS_CP15_64(4, c14)
+
+#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
+#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
+#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
+#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
+#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
+#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
+#define HCR __ACCESS_CP15(c1, 4, c1, 0)
+#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
+#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
+#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
+#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
+#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
+#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
+#define DACR __ACCESS_CP15(c3, 0, c0, 0)
+#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
+#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
+#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
+#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
+#define HSR __ACCESS_CP15(c5, 4, c2, 0)
+#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
+#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
+#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
+#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
+#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
+#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
+#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
+#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
+#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
+#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
+#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
+#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
+#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
+#define CID __ACCESS_CP15(c13, 0, c0, 1)
+#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
+#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
+#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
+#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
+#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
+#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
+#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
+
+#define VFP_FPEXC __ACCESS_VFP(FPEXC)
+
+/* AArch64 compatibility macros, only for the timer so far */
+#define read_sysreg_el0(r) read_sysreg(r##_el0)
+#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
+
+#define cntv_ctl_el0 CNTV_CTL
+#define cntv_cval_el0 CNTV_CVAL
+#define cntvoff_el2 CNTVOFF
+#define cnthctl_el2 CNTHCTL
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+
+void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
+void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
+static inline bool __vfp_enabled(void)
+{
+ return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
+}
+
+void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
+void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
+
+int asmlinkage __guest_enter(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *host);
+int asmlinkage __hyp_do_panic(const char *, int, u32);
+
+#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a520b7987a29..da44be9db4fa 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -179,7 +179,7 @@ struct kvm;
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
- return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
+ return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
}
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 4371f45c5784..d4ceaf5f299b 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void)
{
return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
}
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ return false;
+}
+
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
#endif
#endif /* __ASSEMBLY__ */