summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pti.c
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2018-04-06 13:55:15 -0700
committerIngo Molnar <mingo@kernel.org>2018-04-12 09:05:59 +0200
commit0f561fce4d6979a50415616896512f87a6d1d5c8 (patch)
treec65424a590f9a405e617cf042275443b53349e75 /arch/x86/mm/pti.c
parent639d6aafe437a7464399d2a77d006049053df06f (diff)
downloadlinux-0f561fce4d6979a50415616896512f87a6d1d5c8.tar.bz2
x86/pti: Enable global pages for shared areas
The entry/exit text and cpu_entry_area are mapped into userspace and the kernel. But, they are not _PAGE_GLOBAL. This creates unnecessary TLB misses. Add the _PAGE_GLOBAL flag for these areas. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Hugh Dickins <hughd@google.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180406205515.2977EE7D@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm/pti.c')
-rw-r--r--arch/x86/mm/pti.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 631507f0c198..8082f8b0c10e 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -300,6 +300,27 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
return;
/*
+ * Only clone present PMDs. This ensures only setting
+ * _PAGE_GLOBAL on present PMDs. This should only be
+ * called on well-known addresses anyway, so a non-
+ * present PMD would be a surprise.
+ */
+ if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
+ return;
+
+ /*
+ * Setting 'target_pmd' below creates a mapping in both
+ * the user and kernel page tables. It is effectively
+ * global, so set it as global in both copies. Note:
+ * the X86_FEATURE_PGE check is not _required_ because
+ * the CPU ignores _PAGE_GLOBAL when PGE is not
+ * supported. The check keeps consistentency with
+ * code that only set this bit when supported.
+ */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
+
+ /*
* Copy the PMD. That is, the kernelmode and usermode
* tables will share the last-level page tables of this
* address range
@@ -348,7 +369,7 @@ static void __init pti_clone_entry_text(void)
{
pti_clone_pmds((unsigned long) __entry_text_start,
(unsigned long) __irqentry_text_end,
- _PAGE_RW | _PAGE_GLOBAL);
+ _PAGE_RW);
}
/*