summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2014-02-18 14:29:03 +0000
committerMarc Zyngier <marc.zyngier@arm.com>2014-03-03 01:15:22 +0000
commita3c8bd31af260a17d626514f636849ee1cd1f63e (patch)
tree55b4d57d442e32271bab31615b28e089e616e12e
parent4d44923b17bff283c002ed961373848284aaff1b (diff)
downloadlinux-a3c8bd31af260a17d626514f636849ee1cd1f63e.tar.bz2
ARM: KVM: introduce kvm_p*d_addr_end
The use of p*d_addr_end with stage-2 translation is slightly dodgy, as the IPA is 40bits, while all the p*d_addr_end helpers are taking an unsigned long (arm64 is fine with that as unligned long is 64bit). The fix is to introduce 64bit clean versions of the same helpers, and use them in the stage-2 page table code. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm/kvm/mmu.c10
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h4
3 files changed, 22 insertions, 5 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 6d0f3d3023b7..891afe78311a 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -114,6 +114,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= L_PMD_S2_RDWR;
}
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+#define kvm_pgd_addr_end(addr, end) \
+({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#define kvm_pud_addr_end(addr,end) (end)
+
+#define kvm_pmd_addr_end(addr, end) \
+({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
struct kvm;
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index fc71a8df0e13..c1c08b240f35 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -145,7 +145,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
- addr = pud_addr_end(addr, end);
+ addr = kvm_pud_addr_end(addr, end);
continue;
}
@@ -155,13 +155,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
* move on.
*/
clear_pud_entry(kvm, pud, addr);
- addr = pud_addr_end(addr, end);
+ addr = kvm_pud_addr_end(addr, end);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- addr = pmd_addr_end(addr, end);
+ addr = kvm_pmd_addr_end(addr, end);
continue;
}
@@ -176,10 +176,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
*/
if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
- next = pmd_addr_end(addr, end);
+ next = kvm_pmd_addr_end(addr, end);
if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr);
- next = pud_addr_end(addr, end);
+ next = kvm_pud_addr_end(addr, end);
}
}
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6eaf69b5e42c..00c0cc8b8045 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -121,6 +121,10 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= PMD_S2_RDWR;
}
+#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
+#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
+#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
+
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))