summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c22
-rw-r--r--virt/kvm/kvm_main.c159
2 files changed, 160 insertions, 21 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index d9273f972828..87ecb5f3637d 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -1202,14 +1202,30 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- bool is_dirty = false;
+ bool flush = false;
int r;
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log_protect(kvm, log, &flush);
- if (is_dirty)
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+{
+ bool flush = false;
+ int r;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_clear_dirty_log_protect(kvm, log, &flush);
+
+ if (flush)
kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2679e476b6c3..0041947b7390 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1133,7 +1133,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
* kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
- * are dirty write protect them for next write.
+ * and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
* @is_dirty: flag set if any page is dirty
@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
*
*/
int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty)
+ struct kvm_dirty_log *log, bool *flush)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -1176,37 +1176,114 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
return -ENOENT;
n = kvm_dirty_bitmap_bytes(memslot);
+ *flush = false;
+ if (kvm->manual_dirty_log_protect) {
+ /*
+ * Unlike kvm_get_dirty_log, we always return false in *flush,
+ * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
+ * is some code duplication between this function and
+ * kvm_get_dirty_log, but hopefully all architecture
+ * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
+ * can be eliminated.
+ */
+ dirty_bitmap_buffer = dirty_bitmap;
+ } else {
+ dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
+ memset(dirty_bitmap_buffer, 0, n);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < n / sizeof(long); i++) {
+ unsigned long mask;
+ gfn_t offset;
+
+ if (!dirty_bitmap[i])
+ continue;
+
+ *flush = true;
+ mask = xchg(&dirty_bitmap[i], 0);
+ dirty_bitmap_buffer[i] = mask;
+
+ if (mask) {
+ offset = i * BITS_PER_LONG;
+ kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
+ offset, mask);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+ }
+
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+
+/**
+ * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
+ * and reenable dirty page tracking for the corresponding pages.
+ * @kvm: pointer to kvm instance
+ * @log: slot id and address from which to fetch the bitmap of dirty pages
+ */
+int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log, bool *flush)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int as_id, id, n;
+ gfn_t offset;
+ unsigned long i;
+ unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap_buffer;
+
+ as_id = log->slot >> 16;
+ id = (u16)log->slot;
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ return -EINVAL;
+
+ if ((log->first_page & 63) || (log->num_pages & 63))
+ return -EINVAL;
+
+ slots = __kvm_memslots(kvm, as_id);
+ memslot = id_to_memslot(slots, id);
+
+ dirty_bitmap = memslot->dirty_bitmap;
+ if (!dirty_bitmap)
+ return -ENOENT;
+ n = kvm_dirty_bitmap_bytes(memslot);
+ *flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
- memset(dirty_bitmap_buffer, 0, n);
+ if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
+ return -EFAULT;
spin_lock(&kvm->mmu_lock);
- *is_dirty = false;
- for (i = 0; i < n / sizeof(long); i++) {
- unsigned long mask;
- gfn_t offset;
-
- if (!dirty_bitmap[i])
+ for (offset = log->first_page,
+ i = offset / BITS_PER_LONG, n = log->num_pages / BITS_PER_LONG; n--;
+ i++, offset += BITS_PER_LONG) {
+ unsigned long mask = *dirty_bitmap_buffer++;
+ atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
+ if (!mask)
continue;
- *is_dirty = true;
-
- mask = xchg(&dirty_bitmap[i], 0);
- dirty_bitmap_buffer[i] = mask;
+ mask &= atomic_long_fetch_andnot(mask, p);
+ /*
+ * mask contains the bits that really have been cleared. This
+ * never includes any bits beyond the length of the memslot (if
+ * the length is not aligned to 64 pages), therefore it is not
+ * a problem if userspace sets them in log->dirty_bitmap.
+ */
if (mask) {
- offset = i * BITS_PER_LONG;
+ *flush = true;
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
offset, mask);
}
}
-
spin_unlock(&kvm->mmu_lock);
- if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
- return -EFAULT;
+
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect);
#endif
bool kvm_largepages_enabled(void)
@@ -2948,6 +3025,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
+ case KVM_CAP_ENABLE_CAP_VM:
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+#endif
return 1;
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
@@ -2971,6 +3052,28 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
return kvm_vm_ioctl_check_extension(kvm, arg);
}
+int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ return -EINVAL;
+}
+
+static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ switch (cap->cap) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+ if (cap->flags || (cap->args[0] & ~1))
+ return -EINVAL;
+ kvm->manual_dirty_log_protect = cap->args[0];
+ return 0;
+#endif
+ default:
+ return kvm_vm_ioctl_enable_cap(kvm, cap);
+ }
+}
+
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2984,6 +3087,15 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_CREATE_VCPU:
r = kvm_vm_ioctl_create_vcpu(kvm, arg);
break;
+ case KVM_ENABLE_CAP: {
+ struct kvm_enable_cap cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
+ break;
+ }
case KVM_SET_USER_MEMORY_REGION: {
struct kvm_userspace_memory_region kvm_userspace_mem;
@@ -3004,6 +3116,17 @@ static long kvm_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
break;
}
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CLEAR_DIRTY_LOG: {
+ struct kvm_clear_dirty_log log;
+
+ r = -EFAULT;
+ if (copy_from_user(&log, argp, sizeof(log)))
+ goto out;
+ r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+ break;
+ }
+#endif
#ifdef CONFIG_KVM_MMIO
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;