summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
authorAlexandru Elisei <alexandru.elisei@arm.com>2020-09-10 14:33:51 +0100
committerMarc Zyngier <maz@kernel.org>2020-09-18 16:10:15 +0100
commit523b3999e5f620cb5ccce6a7ca2780a4cab579a2 (patch)
tree25e80cd9568c37a242acd956261fa8a08935fb13 /arch/arm64/kvm/mmu.c
parentc9b69a0cf0b4336fe7d2e35c46273debc68910fd (diff)
downloadlinux-523b3999e5f620cb5ccce6a7ca2780a4cab579a2.tar.bz2
KVM: arm64: Try PMD block mappings if PUD mappings are not supported
When userspace uses hugetlbfs for the VM memory, user_mem_abort() tries to use the same block size to map the faulting IPA in stage 2. If stage 2 cannot the same block mapping because the block size doesn't fit in the memslot or the memslot is not properly aligned, user_mem_abort() will fall back to a page mapping, regardless of the block size. We can do better for PUD backed hugetlbfs by checking if a PMD block mapping is supported before deciding to use a page. vma_pagesize is an unsigned long, use 1UL instead of 1ULL when assigning its value. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200910133351.118191-1-alexandru.elisei@arm.com
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 21b70abf65a7..852497bd6d1e 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -779,16 +779,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
vma_shift = PAGE_SHIFT;
- vma_pagesize = 1ULL << vma_shift;
if (logging_active ||
- (vma->vm_flags & VM_PFNMAP) ||
- !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+ (vma->vm_flags & VM_PFNMAP)) {
force_pte = true;
- vma_pagesize = PAGE_SIZE;
+ vma_shift = PAGE_SHIFT;
+ }
+
+ if (vma_shift == PUD_SHIFT &&
+ !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+ vma_shift = PMD_SHIFT;
+
+ if (vma_shift == PMD_SHIFT &&
+ !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
+ force_pte = true;
+ vma_shift = PAGE_SHIFT;
}
+ vma_pagesize = 1UL << vma_shift;
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
- fault_ipa &= huge_page_mask(hstate_vma(vma));
+ fault_ipa &= ~(vma_pagesize - 1);
gfn = fault_ipa >> PAGE_SHIFT;
mmap_read_unlock(current->mm);