summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/mmu.c
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2018-12-11 15:26:31 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-19 21:05:35 +0000
commite329fb75d519e3dc3eb11b22d5bb846516be3521 (patch)
treeff5077b7233df0346b04fb23e5408edde1cca00e /virt/kvm/arm/mmu.c
parent32f139551954512bfdf9d558341af453bb8b12b4 (diff)
downloadlinux-e329fb75d519e3dc3eb11b22d5bb846516be3521.tar.bz2
KVM: arm/arm64: Factor out VMID into struct kvm_vmid
In preparation for nested virtualization where we are going to have more than a single VMID per VM, let's factor out the VMID data into a separate VMID data structure and change the VMID allocator to operate on this new structure instead of using a struct kvm. This also means that udate_vttbr now becomes update_vmid, and that the vttbr itself is generated on the fly based on the stage 2 page table base address and the vmid. We cache the physical address of the pgd when allocating the pgd to avoid doing the calculation on every entry to the guest and to avoid calling into potentially non-hyp-mapped code from hyp/EL2. If we wanted to merge the VMID allocator with the arm64 ASID allocator at some point in the future, it should actually become easier to do that after this patch. Note that to avoid mapping the kvm_vmid_bits variable into hyp, we simply forego the masking of the vmid value in kvm_get_vttbr and rely on update_vmid to always assign a valid vmid value (within the supported range). Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> [maz: minor cleanups] Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm/arm/mmu.c')
-rw-r--r--virt/kvm/arm/mmu.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..f8dda452ea24 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -908,6 +908,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
*/
int kvm_alloc_stage2_pgd(struct kvm *kvm)
{
+ phys_addr_t pgd_phys;
pgd_t *pgd;
if (kvm->arch.pgd != NULL) {
@@ -920,7 +921,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!pgd)
return -ENOMEM;
+ pgd_phys = virt_to_phys(pgd);
+ if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
+ return -EINVAL;
+
kvm->arch.pgd = pgd;
+ kvm->arch.pgd_phys = pgd_phys;
return 0;
}
@@ -1008,6 +1014,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
+ kvm->arch.pgd_phys = 0;
}
spin_unlock(&kvm->mmu_lock);