summaryrefslogtreecommitdiffstats
path: root/virt/kvm/dirty_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/dirty_ring.c')
-rw-r--r--virt/kvm/dirty_ring.c46
1 files changed, 44 insertions, 2 deletions
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index d6fabf238032..c1cd7dfe4a90 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -21,12 +21,26 @@ u32 kvm_dirty_ring_get_rsvd_entries(void)
return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
}
+bool kvm_use_dirty_bitmap(struct kvm *kvm)
+{
+ lockdep_assert_held(&kvm->slots_lock);
+
+ return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
+}
+
+#ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
+bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
{
return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
}
-bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
+static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
{
return kvm_dirty_ring_used(ring) >= ring->soft_limit;
}
@@ -142,13 +156,19 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
+ /*
+ * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
+ * by the VCPU thread next time when it enters the guest.
+ */
+
trace_kvm_dirty_ring_reset(ring);
return count;
}
-void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
+void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
{
+ struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
struct kvm_dirty_gfn *entry;
/* It should never get full */
@@ -166,6 +186,28 @@ void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
kvm_dirty_gfn_set_dirtied(entry);
ring->dirty_index++;
trace_kvm_dirty_ring_push(ring, slot, offset);
+
+ if (kvm_dirty_ring_soft_full(ring))
+ kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
+}
+
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
+{
+ /*
+ * The VCPU isn't runnable when the dirty ring becomes soft full.
+ * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
+ * the VCPU from running until the dirty pages are harvested and
+ * the dirty ring is reset by userspace.
+ */
+ if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
+ kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
+ kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
+ trace_kvm_dirty_ring_exit(vcpu);
+ return true;
+ }
+
+ return false;
}
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)