summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/vgic
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2018-12-01 08:41:28 -0800
committerMarc Zyngier <maz@kernel.org>2020-05-28 11:57:10 +0100
commitfc5d1f1a42fba6266ab95dc3b84937933a9b5a66 (patch)
treeab3398333d445c1b005a4f6d890c718aeca98b77 /arch/arm64/kvm/vgic
parent0a78791c0d12fcd5d3f486668defb9ab055e3729 (diff)
downloadlinux-fc5d1f1a42fba6266ab95dc3b84937933a9b5a66.tar.bz2
KVM: arm64: vgic-v3: Take cpu_if pointer directly instead of vcpu
If we move the used_lrs field to the version-specific cpu interface structure, the following functions only operate on the struct vgic_v3_cpu_if and not the full vcpu: __vgic_v3_save_state __vgic_v3_restore_state __vgic_v3_activate_traps __vgic_v3_deactivate_traps __vgic_v3_save_aprs __vgic_v3_restore_aprs This is going to be very useful for nested virt, so move the used_lrs field and change the prototypes and implementations of these functions to take the cpu_if parameter directly. No functional change. Reviewed-by: James Morse <james.morse@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/vgic')
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c14
-rw-r--r--arch/arm64/kvm/vgic/vgic.c25
3 files changed, 30 insertions, 19 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 621cc168fe3f..ebf53a4e1296 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -56,7 +56,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
- for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
+ for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr];
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
@@ -120,7 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_put_irq(vcpu->kvm, irq);
}
- vgic_cpu->used_lrs = 0;
+ cpuif->used_lrs = 0;
}
/*
@@ -427,7 +427,7 @@ out:
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+ u64 used_lrs = cpu_if->used_lrs;
u64 elrsr;
int i;
@@ -448,7 +448,7 @@ static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
{
void __iomem *base = kvm_vgic_global_state.vctrl_base;
- u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+ u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
if (!base)
return;
@@ -463,7 +463,7 @@ void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
void __iomem *base = kvm_vgic_global_state.vctrl_base;
- u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+ u64 used_lrs = cpu_if->used_lrs;
int i;
if (!base)
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 3ccd6d3cb4d3..76e2d85789ed 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -39,7 +39,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
- for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
+ for (lr = 0; lr < cpuif->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
u32 intid, cpuid;
struct vgic_irq *irq;
@@ -111,7 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_put_irq(vcpu->kvm, irq);
}
- vgic_cpu->used_lrs = 0;
+ cpuif->used_lrs = 0;
}
/* Requires the irq to be locked already */
@@ -662,10 +662,10 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
if (likely(cpu_if->vgic_sre))
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
- kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
+ kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if));
if (has_vhe())
- __vgic_v3_activate_traps(vcpu);
+ __vgic_v3_activate_traps(cpu_if);
WARN_ON(vgic_v4_load(vcpu));
}
@@ -680,12 +680,14 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
void vgic_v3_put(struct kvm_vcpu *vcpu)
{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
WARN_ON(vgic_v4_put(vcpu, false));
vgic_v3_vmcr_sync(vcpu);
- kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
+ kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if));
if (has_vhe())
- __vgic_v3_deactivate_traps(vcpu);
+ __vgic_v3_deactivate_traps(cpu_if);
}
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 99b02ca730a8..c3643b7f101b 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -786,6 +786,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
int count;
bool multi_sgi;
u8 prio = 0xff;
+ int i = 0;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
@@ -827,11 +828,14 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
}
}
- vcpu->arch.vgic_cpu.used_lrs = count;
-
/* Nuke remaining LRs */
- for ( ; count < kvm_vgic_global_state.nr_lr; count++)
- vgic_clear_lr(vcpu, count);
+ for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
+ vgic_clear_lr(vcpu, i);
+
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
+ else
+ vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
}
static inline bool can_access_vgic_from_kernel(void)
@@ -849,13 +853,13 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
- __vgic_v3_save_state(vcpu);
+ __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ int used_lrs;
/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
@@ -864,7 +868,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
- if (vgic_cpu->used_lrs)
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
+ else
+ used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
+
+ if (used_lrs)
vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
}
@@ -874,7 +883,7 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
- __vgic_v3_restore_state(vcpu);
+ __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
/* Flush our emulation state into the GIC hardware before entering the guest. */