diff options
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | include/kvm/arm_arch_timer.h | 9 | ||||
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 38 | ||||
-rw-r--r-- | virt/kvm/arm/hyp/timer-sr.c | 3 |
6 files changed, 32 insertions, 25 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index d5423ab15ed5..cc495d799c67 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -60,9 +60,6 @@ struct kvm_arch { /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; - /* Timer */ - struct arch_timer_kvm timer; - /* * Anything that is not used directly from assembly code goes * here. diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9d7446456e0c..f93f2171a48b 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -135,7 +135,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) goto out_free_stage2_pgd; kvm_vgic_early_init(kvm); - kvm_timer_init(kvm); /* Mark the initial VMID generation invalid */ kvm->arch.vmid_gen = 0; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e5050388e062..4a758cba1262 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -70,9 +70,6 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; - - /* Timer */ - struct arch_timer_kvm timer; }; #define KVM_NR_MEM_OBJS 40 diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index daad3c133b9f..2c8560b4642a 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -23,11 +23,6 @@ #include <linux/hrtimer.h> #include <linux/workqueue.h> -struct arch_timer_kvm { - /* Virtual offset */ - u64 cntvoff; -}; - struct arch_timer_context { /* Registers: control register, timer value */ u32 cnt_ctl; @@ -38,6 +33,9 @@ struct arch_timer_context { /* Active IRQ state caching */ bool active_cleared_last; + + /* Virtual offset */ + u64 cntvoff; }; struct arch_timer_cpu { @@ -58,7 +56,6 @@ struct arch_timer_cpu { int kvm_timer_hyp_init(void); int kvm_timer_enable(struct kvm_vcpu *vcpu); -void kvm_timer_init(struct kvm *kvm); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index d3556b3ca694..5004a679b125 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -101,9 +101,10 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) { u64 cval, now; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - cval = vcpu_vtimer(vcpu)->cnt_cval; - now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; + cval = vtimer->cnt_cval; + now = kvm_phys_timer_read() - vtimer->cntvoff; if (now < cval) { u64 ns; @@ -159,7 +160,7 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) return false; cval = vtimer->cnt_cval; - now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; + now = kvm_phys_timer_read() - vtimer->cntvoff; return cval <= now; } @@ -354,10 +355,32 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, return 0; } +/* Make the updates of cntvoff for all vtimer contexts atomic */ +static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) +{ + int i; + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(i, tmp, kvm) + vcpu_vtimer(tmp)->cntvoff = cntvoff; + + /* + * When called from the vcpu create path, the CPU being created is not + * included in the loop above, so we just set it here as well. + */ + vcpu_vtimer(vcpu)->cntvoff = cntvoff; + mutex_unlock(&kvm->lock); +} + void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + /* Synchronize cntvoff across all vtimers of a VM. */ + update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); + INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); timer->timer.function = kvm_timer_expire; @@ -377,7 +400,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) vtimer->cnt_ctl = value; break; case KVM_REG_ARM_TIMER_CNT: - vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; + update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); break; case KVM_REG_ARM_TIMER_CVAL: vtimer->cnt_cval = value; @@ -398,7 +421,7 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) case KVM_REG_ARM_TIMER_CTL: return vtimer->cnt_ctl; case KVM_REG_ARM_TIMER_CNT: - return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; + return kvm_phys_timer_read() - vtimer->cntvoff; case KVM_REG_ARM_TIMER_CVAL: return vtimer->cnt_cval; } @@ -510,11 +533,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return 0; } -void kvm_timer_init(struct kvm *kvm) -{ - kvm->arch.timer.cntvoff = kvm_phys_timer_read(); -} - /* * On VHE system, we only need to configure trap on physical timer and counter * accesses in EL0 and EL1 once, not for every world switch. diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c index 0cf08953e81c..4734915ab71f 100644 --- a/virt/kvm/arm/hyp/timer-sr.c +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -53,7 +53,6 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) { - struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); u64 val; @@ -71,7 +70,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) } if (timer->enabled) { - write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); + write_sysreg(vtimer->cntvoff, cntvoff_el2); write_sysreg_el0(vtimer->cnt_cval, cntv_cval); isb(); write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); |