summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/tlbflush.h
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-16 09:22:22 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-16 09:22:22 +0200
commit58f2c391cc0560231d7636c39d31b1b26c9396b7 (patch)
tree67956eea736a8763f67be8a52bcaf0d3fe4374f4 /arch/x86/include/asm/tlbflush.h
parentc2e5df616e1ae6c2a074cb241ebb65a318ebaf7c (diff)
parent33d930e59a98fa10a0db9f56c7fa2f21a4aef9b9 (diff)
downloadlinux-58f2c391cc0560231d7636c39d31b1b26c9396b7.tar.bz2
Merge 4.14-rc5 into char-misc-next
We need the fixes in here to resolve merge issues and for testing. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/include/asm/tlbflush.h')
-rw-r--r--arch/x86/include/asm/tlbflush.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4893abf7f74f..d362161d3291 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -83,6 +83,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#endif
/*
+ * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
+ * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
+ * it's false, then we immediately switch CR3 when entering a kernel thread.
+ */
+DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+
+/*
* 6 because 6 should be plenty and struct tlb_state will fit in
* two cache lines.
*/
@@ -105,6 +112,23 @@ struct tlb_state {
u16 next_asid;
/*
+ * We can be in one of several states:
+ *
+ * - Actively using an mm. Our CPU's bit will be set in
+ * mm_cpumask(loaded_mm) and is_lazy == false;
+ *
+ * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
+ * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+ *
+ * - Lazily using a real mm. loaded_mm != &init_mm, our bit
+ * is set in mm_cpumask(loaded_mm), but is_lazy == true.
+ * We're heuristically guessing that the CR3 load we
+ * skipped more than makes up for the overhead added by
+ * lazy mode.
+ */
+ bool is_lazy;
+
+ /*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.
*/