summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp
diff options
context:
space:
mode:
authorQuentin Perret <qperret@google.com>2021-03-19 10:01:40 +0000
committerMarc Zyngier <maz@kernel.org>2021-03-19 12:01:22 +0000
commitbc224df155c466178128a2950af16cba37b6f218 (patch)
tree431ee687c306589253a0edf8b915aaf8c6ef4e67 /arch/arm64/kvm/hyp
parent2fcb3a59401d2d12b5337b62c799eeb22cf40a2c (diff)
downloadlinux-bc224df155c466178128a2950af16cba37b6f218.tar.bz2
KVM: arm64: Introduce KVM_PGTABLE_S2_NOFWB stage 2 flag
In order to further configure stage 2 page-tables, pass flags to the init function using a new enum. The first of these flags allows to disable FWB even if the hardware supports it as we will need to do so for the host stage 2. Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210319100146.1149909-33-qperret@google.com
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c56
1 files changed, 31 insertions, 25 deletions
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index dc6ef2cfe3eb..b22b4860630c 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -508,11 +508,22 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
return vtcr;
}
-static int stage2_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
+static bool stage2_has_fwb(struct kvm_pgtable *pgt)
+{
+ if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ return false;
+
+ return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
+}
+
+#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
+
+static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
+ kvm_pte_t *ptep)
{
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
- kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) :
- PAGE_S2_MEMATTR(NORMAL);
+ kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
+ KVM_S2_MEMATTR(pgt, NORMAL);
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
if (!(prot & KVM_PGTABLE_PROT_X))
@@ -749,7 +760,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
.arg = &map_data,
};
- ret = stage2_set_prot_attr(prot, &map_data.attr);
+ ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
if (ret)
return ret;
@@ -784,18 +795,10 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
-static void stage2_flush_dcache(void *addr, u64 size)
-{
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
- return;
-
- __flush_dcache_area(addr, size);
-}
-
-static bool stage2_pte_cacheable(kvm_pte_t pte)
+static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
{
u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
- return memattr == PAGE_S2_MEMATTR(NORMAL);
+ return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
}
static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
@@ -821,8 +824,8 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (mm_ops->page_count(childp) != 1)
return 0;
- } else if (stage2_pte_cacheable(pte)) {
- need_flush = true;
+ } else if (stage2_pte_cacheable(pgt, pte)) {
+ need_flush = !stage2_has_fwb(pgt);
}
/*
@@ -833,7 +836,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) {
- stage2_flush_dcache(kvm_pte_follow(pte, mm_ops),
+ __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
kvm_granule_size(level));
}
@@ -979,13 +982,14 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
+ struct kvm_pgtable *pgt = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep;
- if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte))
+ if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0;
- stage2_flush_dcache(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+ __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
return 0;
}
@@ -994,17 +998,18 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
struct kvm_pgtable_walker walker = {
.cb = stage2_flush_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
- .arg = pgt->mm_ops,
+ .arg = pgt,
};
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (stage2_has_fwb(pgt))
return 0;
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
-int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
- struct kvm_pgtable_mm_ops *mm_ops)
+int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+ struct kvm_pgtable_mm_ops *mm_ops,
+ enum kvm_pgtable_stage2_flags flags)
{
size_t pgd_sz;
u64 vtcr = arch->vtcr;
@@ -1021,6 +1026,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
pgt->start_level = start_level;
pgt->mm_ops = mm_ops;
pgt->mmu = &arch->mmu;
+ pgt->flags = flags;
/* Ensure zeroed PGD pages are visible to the hardware walker */
dsb(ishst);
@@ -1101,7 +1107,7 @@ int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
u32 level;
int ret;
- ret = stage2_set_prot_attr(prot, &attr);
+ ret = stage2_set_prot_attr(pgt, prot, &attr);
if (ret)
return ret;
attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;