summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/kvm_emulate.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 09:13:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 09:13:19 -0700
commit8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch)
treec646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/include/asm/kvm_emulate.h
parent9992ba72327fa0d8bdc9fb624e80f5cce338a711 (diff)
parent33b9f582c5c1db515412cc7efff28f7d1779321f (diff)
downloadlinux-8546dc1d4b671480961c3eaf4c0c102ae6848340.tar.bz2
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "The major items included in here are: - MCPM, multi-cluster power management, part of the infrastructure required for ARMs big.LITTLE support. - A rework of the ARM KVM code to allow re-use by ARM64. - Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes of that stuff for arch/arm - Preparatory patches for Cortex-M3 support from Uwe Kleine-König. There is also a set of three patches in here from Hugh/Catalin to address freeing of inappropriate page tables on LPAE. You already have these from akpm, but they were already part of my tree at the time he sent them, so unfortunately they'll end up with duplicate commits" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE() ARM: IMX: remove unnecessary use of IS_ERR_VALUE() ARM: OMAP: use consistent error checking ARM: cleanup: OMAP hwmod error checking ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels ARM: 7700/2: Make cpu_init() notrace ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM: 7701/1: mm: Allow arch code to control the user page table ceiling ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum() ARM: mcpm: provide an interface to set the SMP ops at run time ARM: mcpm: generic SMP secondary bringup and hotplug support ARM: mcpm_head.S: vlock-based first man election ARM: mcpm: Add baremetal voting mutexes ARM: mcpm: introduce helpers for platform coherency exit/setup ARM: mcpm: introduce the CPU/cluster power API ARM: multi-cluster PM: secondary kernel entry code ARM: cacheflush: add synchronization helpers for mixed cache state accesses ARM: cpu hotplug: remove majority of cache flushing from platforms ARM: smp: flush L1 cache in cpu_die() ARM: tegra: remove tegra specific cpu_disable() ...
Diffstat (limited to 'arch/arm/include/asm/kvm_emulate.h')
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
1 files changed, 100 insertions, 7 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb5..82b4babead2c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/kvm_arm.h>
-u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
-int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
return 1;
}
-static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
- return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+ return &vcpu->arch.regs.usr_regs.ARM_pc;
}
-static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
- return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+ return &vcpu->arch.regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
return reg == 15;
}
+static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hsr;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hxfar;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
+{
+ return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
+}
+
+static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hyp_pc;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
+}
+
+static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+}
+
+static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+{
+ return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+}
+
+/* Get Access Size from a data abort */
+static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+{
+ switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
+ case 0:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ default:
+ kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+ return -EFAULT;
+ }
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+}
+
+static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
+}
+
+static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+}
+
#endif /* __ARM_KVM_EMULATE_H__ */