diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2017-10-23 17:11:22 +0100 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2018-01-08 15:20:46 +0100 |
commit | 17ab9d57debaa53d665651e425a0efc4a893c039 (patch) | |
tree | 7550e58a5d2997ed4a54108eb3e53f721a8b1472 | |
parent | 7a3796d2ef5bb948f709467eef1bf96edbfc67a0 (diff) | |
download | linux-17ab9d57debaa53d665651e425a0efc4a893c039.tar.bz2 |
KVM: arm/arm64: Drop vcpu parameter from guest cache maintenance operartions
The vcpu parameter isn't used for anything, and gets in the way of
further cleanups. Let's get rid of it.
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 7 | ||||
-rw-r--r-- | virt/kvm/arm/mmu.c | 18 |
3 files changed, 12 insertions, 20 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index aab64fe52146..bc70a1f0f42d 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -150,9 +150,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; } -static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu, - kvm_pfn_t pfn, - unsigned long size) +static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) { /* * Clean the dcache to the Point of Coherency. @@ -177,8 +175,7 @@ static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu, } } -static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu, - kvm_pfn_t pfn, +static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) { u32 iclsz; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 126abefffe7f..06f1f9794679 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -252,17 +252,14 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; } -static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu, - kvm_pfn_t pfn, - unsigned long size) +static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) { void *va = page_address(pfn_to_page(pfn)); kvm_flush_dcache_to_poc(va, size); } -static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu, - kvm_pfn_t pfn, +static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) { if (icache_is_aliasing()) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index b83b5a8442bb..a1ea43fa75cf 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1276,16 +1276,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } -static void clean_dcache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, - unsigned long size) +static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) { - __clean_dcache_guest_page(vcpu, pfn, size); + __clean_dcache_guest_page(pfn, size); } -static void invalidate_icache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, - unsigned long size) +static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) { - __invalidate_icache_guest_page(vcpu, pfn, size); + __invalidate_icache_guest_page(pfn, size); } static void kvm_send_hwpoison_signal(unsigned long address, @@ -1421,11 +1419,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } if (fault_status != FSC_PERM) - clean_dcache_guest_page(vcpu, pfn, PMD_SIZE); + clean_dcache_guest_page(pfn, PMD_SIZE); if (exec_fault) { new_pmd = kvm_s2pmd_mkexec(new_pmd); - invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE); + invalidate_icache_guest_page(pfn, PMD_SIZE); } else if (fault_status == FSC_PERM) { /* Preserve execute if XN was already cleared */ if (stage2_is_exec(kvm, fault_ipa)) @@ -1443,11 +1441,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } if (fault_status != FSC_PERM) - clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE); + clean_dcache_guest_page(pfn, PAGE_SIZE); if (exec_fault) { new_pte = kvm_s2pte_mkexec(new_pte); - invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE); + invalidate_icache_guest_page(pfn, PAGE_SIZE); } else if (fault_status == FSC_PERM) { /* Preserve execute if XN was already cleared */ if (stage2_is_exec(kvm, fault_ipa)) |