diff options
author | Mark Rutland <mark.rutland@arm.com> | 2018-07-05 15:16:53 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-07-05 17:24:15 +0100 |
commit | 256c0960b7b6453dc90a4e879da52ab76b4037f9 (patch) | |
tree | d0da76f0c60112de94e055247f8e25fb96c548c2 /virt | |
parent | d64567f67835736d65086e9bfc41a19b2863c32e (diff) | |
download | linux-256c0960b7b6453dc90a4e879da52ab76b4037f9.tar.bz2 |
kvm/arm: use PSR_AA32 definitions
Some code cares about the SPSR_ELx format for exceptions taken from
AArch32 to inspect or manipulate the SPSR_ELx value, which is already in
the SPSR_ELx format, and not in the AArch32 PSR format.
To separate these from cases where we care about the AArch32 PSR format,
migrate these cases to use the PSR_AA32_* definitions rather than
COMPAT_PSR_*.
There should be no functional change as a result of this patch.
Note that arm64 KVM does not support a compat KVM API, and always uses
the SPSR_ELx format, even for AArch32 guests.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/aarch32.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index efc84cbe8277..5abbe9b3c652 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) { unsigned long itbits, cond; unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); + bool is_arm = !(cpsr & PSR_AA32_T_BIT); - if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK)) + if (is_arm || !(cpsr & PSR_AA32_IT_MASK)) return; cond = (cpsr & 0xe000) >> 13; @@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) else itbits = (itbits << 1) & 0x1f; - cpsr &= ~COMPAT_PSR_IT_MASK; + cpsr &= ~PSR_AA32_IT_MASK; cpsr |= cond << 13; cpsr |= (itbits & 0x1c) << (10 - 2); cpsr |= (itbits & 0x3) << 25; @@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) { bool is_thumb; - is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT); + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); if (is_thumb && !is_wide_instr) *vcpu_pc(vcpu) += 2; else @@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) { unsigned long cpsr; unsigned long new_spsr_value = *vcpu_cpsr(vcpu); - bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); + bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT); u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - cpsr = mode | COMPAT_PSR_I_BIT; + cpsr = mode | PSR_AA32_I_BIT; if (sctlr & (1 << 30)) - cpsr |= COMPAT_PSR_T_BIT; + cpsr |= PSR_AA32_T_BIT; if (sctlr & (1 << 25)) - cpsr |= COMPAT_PSR_E_BIT; + cpsr |= PSR_AA32_E_BIT; *vcpu_cpsr(vcpu) = cpsr; @@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) void kvm_inject_undef32(struct kvm_vcpu *vcpu) { - prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); } /* @@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, fsr = &vcpu_cp15(vcpu, c5_DFSR); } - prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); + prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset); *far = addr; |