summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/lapic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 15:13:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 15:13:15 -0700
commit8c1b724ddb218f221612d4c649bc9c7819d8d7a6 (patch)
tree0e226f4156b554eec2690adb8f30ba54b15b68cc /arch/x86/kvm/lapic.c
parentf14a9532ee30c68a56ff502c382860f674cc180c (diff)
parent514ccc194971d0649e4e7ec8a9b3a6e33561d7bf (diff)
downloadlinux-8c1b724ddb218f221612d4c649bc9c7819d8d7a6.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "ARM: - GICv4.1 support - 32bit host removal PPC: - secure (encrypted) using under the Protected Execution Framework ultravisor s390: - allow disabling GISA (hardware interrupt injection) and protected VMs/ultravisor support. x86: - New dirty bitmap flag that sets all bits in the bitmap when dirty page logging is enabled; this is faster because it doesn't require bulk modification of the page tables. - Initial work on making nested SVM event injection more similar to VMX, and less buggy. - Various cleanups to MMU code (though the big ones and related optimizations were delayed to 5.8). Instead of using cr3 in function names which occasionally means eptp, KVM too has standardized on "pgd". - A large refactoring of CPUID features, which now use an array that parallels the core x86_features. - Some removal of pointer chasing from kvm_x86_ops, which will also be switched to static calls as soon as they are available. - New Tigerlake CPUID features. - More bugfixes, optimizations and cleanups. Generic: - selftests: cleanups, new MMU notifier stress test, steal-time test - CSV output for kvm_stat" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (277 commits) x86/kvm: fix a missing-prototypes "vmread_error" KVM: x86: Fix BUILD_BUG() in __cpuid_entry_get_reg() w/ CONFIG_UBSAN=y KVM: VMX: Add a trampoline to fix VMREAD error handling KVM: SVM: Annotate svm_x86_ops as __initdata KVM: VMX: Annotate vmx_x86_ops as __initdata KVM: x86: Drop __exit from kvm_x86_ops' hardware_unsetup() KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection KVM: x86: Set kvm_x86_ops only after ->hardware_setup() completes KVM: VMX: Configure runtime hooks using vmx_x86_ops KVM: VMX: Move hardware_setup() definition below vmx_x86_ops KVM: x86: Move init-only kvm_x86_ops to separate struct KVM: Pass kvm_init()'s opaque param to additional arch funcs s390/gmap: return proper error code on ksm unsharing KVM: selftests: Fix cosmetic copy-paste error in vm_mem_region_move() KVM: Fix out of range accesses to memslots KVM: X86: Micro-optimize IPI fastpath delay KVM: X86: Delay read msr data iff writes ICR MSR KVM: PPC: Book3S HV: Add a capability for enabling secure guests KVM: arm64: GICv4.1: Expose HW-based SGIs in debugfs KVM: arm64: GICv4.1: Allow non-trapping WFI when using HW SGIs ...
Diffstat (limited to 'arch/x86/kvm/lapic.c')
-rw-r--r--arch/x86/kvm/lapic.c85
1 files changed, 53 insertions, 32 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 7356a56e6282..ca80daf8f878 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -164,14 +164,28 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
kvfree(map);
}
-static void recalculate_apic_map(struct kvm *kvm)
+void kvm_recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
u32 max_id = 255; /* enough space for any xAPIC ID */
+ if (!kvm->arch.apic_map_dirty) {
+ /*
+ * Read kvm->arch.apic_map_dirty before
+ * kvm->arch.apic_map
+ */
+ smp_rmb();
+ return;
+ }
+
mutex_lock(&kvm->arch.apic_map_lock);
+ if (!kvm->arch.apic_map_dirty) {
+ /* Someone else has updated the map. */
+ mutex_unlock(&kvm->arch.apic_map_lock);
+ return;
+ }
kvm_for_each_vcpu(i, vcpu, kvm)
if (kvm_apic_present(vcpu))
@@ -236,6 +250,12 @@ out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
+ /*
+ * Write kvm->arch.apic_map before
+ * clearing apic->apic_map_dirty
+ */
+ smp_wmb();
+ kvm->arch.apic_map_dirty = false;
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
@@ -257,20 +277,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
else
static_key_slow_inc(&apic_sw_disabled.key);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
}
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
kvm_lapic_set_reg(apic, APIC_LDR, id);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -286,7 +306,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -294,11 +314,6 @@ static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
}
-static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
-{
- return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
-}
-
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
@@ -448,7 +463,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
if (unlikely(vcpu->arch.apicv_active)) {
/* need to update RVI */
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
- kvm_x86_ops->hwapic_irr_update(vcpu,
+ kvm_x86_ops.hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
} else {
apic->irr_pending = false;
@@ -473,7 +488,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* just set SVI.
*/
if (unlikely(vcpu->arch.apicv_active))
- kvm_x86_ops->hwapic_isr_update(vcpu, vec);
+ kvm_x86_ops.hwapic_isr_update(vcpu, vec);
else {
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -521,7 +536,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(vcpu->arch.apicv_active))
- kvm_x86_ops->hwapic_isr_update(vcpu,
+ kvm_x86_ops.hwapic_isr_update(vcpu,
apic_find_highest_isr(apic));
else {
--apic->isr_count;
@@ -659,7 +674,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
if (apic->vcpu->arch.apicv_active)
- highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
+ highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
@@ -1048,7 +1063,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
+ if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
@@ -1226,7 +1241,7 @@ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
-static void apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
+void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
{
struct kvm_lapic_irq irq;
@@ -1737,7 +1752,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
{
WARN_ON(preemptible());
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+ kvm_x86_ops.cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
}
@@ -1748,13 +1763,13 @@ static bool start_hv_timer(struct kvm_lapic *apic)
bool expired;
WARN_ON(preemptible());
- if (!kvm_x86_ops->set_hv_timer)
+ if (!kvm_x86_ops.set_hv_timer)
return false;
if (!ktimer->tscdeadline)
return false;
- if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
+ if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
return false;
ktimer->hv_timer_in_use = true;
@@ -1917,7 +1932,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
} else
ret = 1;
break;
@@ -1946,7 +1961,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
val &= ~(1 << 12);
- apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
+ kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
kvm_lapic_set_reg(apic, APIC_ICR, val);
break;
@@ -2023,6 +2038,8 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
}
+ kvm_recalculate_apic_map(apic->vcpu->kvm);
+
return ret;
}
EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
@@ -2171,7 +2188,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_key_slow_dec_deferred(&apic_hw_disabled);
} else {
static_key_slow_inc(&apic_hw_disabled.key);
- recalculate_apic_map(vcpu->kvm);
+ vcpu->kvm->arch.apic_map_dirty = true;
}
}
@@ -2179,7 +2196,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
- kvm_x86_ops->set_virtual_apic_mode(vcpu);
+ kvm_x86_ops.set_virtual_apic_mode(vcpu);
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
@@ -2212,6 +2229,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!apic)
return;
+ vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
@@ -2256,13 +2274,15 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
if (vcpu->arch.apicv_active) {
- kvm_x86_ops->apicv_post_state_restore(vcpu);
- kvm_x86_ops->hwapic_irr_update(vcpu, -1);
- kvm_x86_ops->hwapic_isr_update(vcpu, -1);
+ kvm_x86_ops.apicv_post_state_restore(vcpu);
+ kvm_x86_ops.hwapic_irr_update(vcpu, -1);
+ kvm_x86_ops.hwapic_isr_update(vcpu, -1);
}
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
+
+ kvm_recalculate_apic_map(vcpu->kvm);
}
/*
@@ -2484,17 +2504,18 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
struct kvm_lapic *apic = vcpu->arch.apic;
int r;
-
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
r = kvm_apic_state_fixup(vcpu, s, true);
- if (r)
+ if (r) {
+ kvm_recalculate_apic_map(vcpu->kvm);
return r;
+ }
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
- recalculate_apic_map(vcpu->kvm);
+ kvm_recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
@@ -2506,10 +2527,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
- kvm_x86_ops->apicv_post_state_restore(vcpu);
- kvm_x86_ops->hwapic_irr_update(vcpu,
+ kvm_x86_ops.apicv_post_state_restore(vcpu);
+ kvm_x86_ops.hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
- kvm_x86_ops->hwapic_isr_update(vcpu,
+ kvm_x86_ops.hwapic_isr_update(vcpu,
apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);