summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-04-21 11:20:36 +0200
committerBorislav Petkov <bp@suse.de>2020-04-26 11:10:25 +0200
commit29def599b38bb8a10f48f83821dd990615300b04 (patch)
tree6135cb7b51ad2a42d09e9cf6d993bab864032b31 /arch
parent58430c5dba7bfe1d132b3c07f0d7a596852ef55c (diff)
downloadlinux-29def599b38bb8a10f48f83821dd990615300b04.tar.bz2
x86/tlb: Move flush_tlb_others() out of line
cpu_tlbstate is exported because various TLB-related functions need access to it, but cpu_tlbstate is sensitive information which should only be accessed by well-contained kernel functions and not be directly exposed to modules. As a last step, move __flush_tlb_others() out of line and hide the native function. The latter can be static when CONFIG_PARAVIRT is disabled. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200421092559.641957686@linutronix.de
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h10
-rw-r--r--arch/x86/mm/tlb.c11
3 files changed, 17 insertions, 10 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dcd6517a694a..5ca5d297df75 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -50,6 +50,8 @@ static inline void slow_down_io(void)
void native_flush_tlb_local(void);
void native_flush_tlb_global(void);
void native_flush_tlb_one_user(unsigned long addr);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info);
static inline void __flush_tlb_local(void)
{
@@ -66,8 +68,8 @@ static inline void __flush_tlb_one_user(unsigned long addr)
PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
}
-static inline void flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static inline void __flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
{
PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
}
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index bbb94f05e1f3..d064ae8a0f2a 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -140,10 +140,14 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
}
+struct flush_tlb_info;
+
void flush_tlb_local(void);
void flush_tlb_global(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
+void flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
@@ -418,9 +422,6 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info);
-
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
/*
@@ -442,9 +443,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, info) \
- native_flush_tlb_others(mask, info)
-
#define paravirt_tlb_remove_table(tlb, page) \
tlb_remove_page(tlb, (void *)(page))
#endif
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ad217ed2a74f..209799dabc70 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -25,6 +25,7 @@
# define __flush_tlb_local native_flush_tlb_local
# define __flush_tlb_global native_flush_tlb_global
# define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
+# define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
#endif
/*
@@ -715,8 +716,8 @@ static bool tlb_is_not_lazy(int cpu, void *data)
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (info->end == TLB_FLUSH_ALL)
@@ -766,6 +767,12 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1, cpumask);
}
+void flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ __flush_tlb_others(cpumask, info);
+}
+
/*
* See Documentation/x86/tlb.rst for details. We choose 33
* because it is large enough to cover the vast majority (at