summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-23 14:55:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-23 14:55:01 -0700
commit706a1ea65e6faaf853427a0e931f59d604dd45e3 (patch)
tree2c0ca7652ac5398c05f389bdd7d5b5377d43fee7 /include
parentd40acad1f1979194ecda83f77468751244b4b098 (diff)
parent48a8b97cfd804a965fbbe7be2d56a7984ef6bdb1 (diff)
downloadlinux-706a1ea65e6faaf853427a0e931f59d604dd45e3.tar.bz2
Merge branch 'tlb-fixes'
Merge fixes for missing TLB shootdowns. This fixes a couple of cases that involved us possibly freeing page table structures before the required TLB shootdown had been done. There are a few cleanup patches to make the code easier to follow, and to avoid some of the more problematic cases entirely when not necessary. To make this easier for backports, it undoes the recent lazy TLB patches, because the cleanups and fixes are more important, and Rik is ok with re-doing them later when things have calmed down. The missing TLB flush was only delayed, and the wrong ordering only happened under memory pressure (and in theory under a couple of other fairly theoretical situations), so this may have been all very unlikely to have hit people in practice. But getting the TLB shootdown wrong is _so_ hard to debug and see that I consider this a crticial fix. Many thanks to Jann Horn for having debugged this. * tlb-fixes: x86/mm: Only use tlb_remove_table() for paravirt mm: mmu_notifier fix for tlb_end_vma mm/tlb, x86/mm: Support invalidating TLB caches for RCU_TABLE_FREE mm/tlb: Remove tlb_remove_table() non-concurrent condition mm: move tlb_table_flush to tlb_flush_mmu_free x86/mm/tlb: Revert the recent lazy TLB patches
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/tlb.h27
1 files changed, 13 insertions, 14 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e811ef7b8350..b3353e21f3b3 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,6 +15,7 @@
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
+#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ if (!tlb->end)
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
+}
+
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define __tlb_end_vma(tlb, vma) \
do { \
- if (!tlb->fullmm && tlb->end) { \
- tlb_flush(tlb); \
- __tlb_reset_range(tlb); \
- } \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
} while (0)
#ifndef tlb_end_vma
@@ -303,14 +312,4 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_migrate_finish(mm) do {} while (0)
-/*
- * Used to flush the TLB when page tables are removed, when lazy
- * TLB mode may cause a CPU to retain intermediate translations
- * pointing to about-to-be-freed page table memory.
- */
-#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
-#define tlb_flush_remove_tables(mm) do {} while (0)
-#define tlb_flush_remove_tables_local(mm) do {} while (0)
-#endif
-
#endif /* _ASM_GENERIC__TLB_H */