diff options
author | Sean Christopherson <seanjc@google.com> | 2021-12-06 20:54:12 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-12-08 04:24:20 -0500 |
commit | 509c594ca2dc8828b7b5d7b33192384741567cdf (patch) | |
tree | 6918e6b1bd3839ba5755a91ce3535b7acdee9344 /arch | |
parent | 537a17b3149300987456e8949ccb991e604047d6 (diff) | |
download | linux-509c594ca2dc8828b7b5d7b33192384741567cdf.tar.bz2 |
KVM: arm64: Use "new" memslot instead of userspace memory region
Get the slot ID, hva, etc... from the "new" memslot instead of the
userspace memory region when preparing/committing a memory region. This
will allow a future commit to drop @mem from the prepare/commit hooks
once all architectures convert to using "new".
Opportunistically wait to get the hva begin+end until after filtering out
the DELETE case in anticipation of a future commit passing NULL for @new
when deleting a memslot.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <c019d00c2531520c52e0b52dfda1be5aa898103c.1638817639.git.maciej.szmigiero@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kvm/mmu.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 5d474360bf6c..dd95350ea15d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1473,14 +1473,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * allocated dirty_bitmap[], dirty pages will be tracked while the * memory slot is write protected. */ - if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) { /* * If we're with initial-all-set, we don't need to write * protect any pages because they're all reported as dirty. * Huge pages and normal pages will be write protect gradually. */ if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { - kvm_mmu_wp_memory_region(kvm, mem->slot); + kvm_mmu_wp_memory_region(kvm, new->id); } } } @@ -1491,8 +1491,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - hva_t hva = mem->userspace_addr; - hva_t reg_end = hva + mem->memory_size; + hva_t hva, reg_end; int ret = 0; if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && @@ -1506,6 +1505,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) return -EFAULT; + hva = new->userspace_addr; + reg_end = hva + (new->npages << PAGE_SHIFT); + mmap_read_lock(current->mm); /* * A memory region could potentially cover multiple VMAs, and any holes |