summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/tlb.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-08-23 19:26:21 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2018-09-11 16:49:11 +0100
commit67a902ac598dca056366a7342f401aa6f605072f (patch)
treec748c800db53cd3a9ee3c61d8bba481be1b54b02 /arch/arm64/include/asm/tlb.h
parentd8289d3a5854a2a0ae144bff106a78738fe63050 (diff)
downloadlinux-67a902ac598dca056366a7342f401aa6f605072f.tar.bz2
arm64: tlbflush: Allow stride to be specified for __flush_tlb_range()
When we are unmapping intermediate page-table entries or huge pages, we don't need to issue a TLBI instruction for every PAGE_SIZE chunk in the VA range being unmapped. Allow the invalidation stride to be passed to __flush_tlb_range(), and adjust our "just nuke the ASID" heuristic to take this into account. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/tlb.h')
-rw-r--r--arch/arm64/include/asm/tlb.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a3233167be60..1e1f68ce28f4 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -53,7 +53,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
* the __(pte|pmd|pud)_free_tlb() functions, so last level
* TLBI is sufficient here.
*/
- __flush_tlb_range(&vma, tlb->start, tlb->end, true);
+ __flush_tlb_range(&vma, tlb->start, tlb->end, PAGE_SIZE, true);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,