summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/nvhe
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2019-01-04 21:09:05 +0100
committerMarc Zyngier <maz@kernel.org>2020-07-07 09:28:37 +0100
commita0e50aa3f4a8a5c9fb791b8f32d624e2a1a735bd (patch)
tree6df05ab2372edd3bdd87f49e692cc0cac333f726 /arch/arm64/kvm/hyp/nvhe
parentae4bffb5552712986a6ad4009b38b6560d952c4e (diff)
downloadlinux-a0e50aa3f4a8a5c9fb791b8f32d624e2a1a735bd.tar.bz2
KVM: arm64: Factor out stage 2 page table data from struct kvm
As we are about to reuse our stage 2 page table manipulation code for shadow stage 2 page tables in the context of nested virtualization, we are going to manage multiple stage 2 page tables for a single VM. This requires some pretty invasive changes to our data structures, which moves the vmid and pgd pointers into a separate structure and change pretty much all of our mmu code to operate on this structure instead. The new structure is called struct kvm_s2_mmu. There is no intended functional change by this patch alone. Reviewed-by: James Morse <james.morse@arm.com> Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com> [Designed data structure layout in collaboration] Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Co-developed-by: Marc Zyngier <maz@kernel.org> [maz: Moved the last_vcpu_ran down to the S2 MMU structure as well] Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c33
2 files changed, 17 insertions, 18 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index a1dcf59bd45e..37321b2157d9 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -194,7 +194,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt);
- __activate_vm(kern_hyp_va(vcpu->kvm));
+ __activate_vm(kern_hyp_va(vcpu->arch.hw_mmu));
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index d4475f8340c4..11dbe031f0ba 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -12,7 +12,8 @@ struct tlb_inv_context {
u64 tcr;
};
-static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
+static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
+ struct tlb_inv_context *cxt)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
@@ -30,12 +31,10 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
isb();
}
- /* __load_guest_stage2() includes an ISB for the workaround. */
- __load_guest_stage2(kvm);
- asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
+ __load_guest_stage2(mmu);
}
-static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
+static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
@@ -47,15 +46,15 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
}
}
-void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
{
struct tlb_inv_context cxt;
dsb(ishst);
/* Switch to requested VMID */
- kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest(kvm, &cxt);
+ mmu = kern_hyp_va(mmu);
+ __tlb_switch_to_guest(mmu, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -98,39 +97,39 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host(kvm, &cxt);
+ __tlb_switch_to_host(&cxt);
}
-void __kvm_tlb_flush_vmid(struct kvm *kvm)
+void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
dsb(ishst);
/* Switch to requested VMID */
- kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest(kvm, &cxt);
+ mmu = kern_hyp_va(mmu);
+ __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host(kvm, &cxt);
+ __tlb_switch_to_host(&cxt);
}
-void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
{
- struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(kvm, &cxt);
+ mmu = kern_hyp_va(mmu);
+ __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host(kvm, &cxt);
+ __tlb_switch_to_host(&cxt);
}
void __kvm_flush_vm_context(void)