summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2019-12-27 17:36:11 +0100
committerIngo Molnar <mingo@kernel.org>2020-01-07 08:11:23 +0100
commitee6352b2c47a24234398e06381edd93a8e965976 (patch)
treea2f241fb372f40afce253afce341d233fa02067d /arch/x86/mm
parentc79f46a282390e0f5b306007bf7b11a46d529538 (diff)
downloadlinux-ee6352b2c47a24234398e06381edd93a8e965976.tar.bz2
x86/context-tracking: Remove exception_enter/exit() from do_page_fault()
do_page_fault(), like other exceptions, is already covered by user_enter() and user_exit() when the exception triggers in userspace. As explained in: 8c84014f3bbb11 ("x86/entry: Remove exception_enter() from most trap handlers") exception_enter/exit() only remained to handle possible page fault from kernel mode while context tracking is in CONTEXT_USER mode, ie: on kernel entry before we manage to call user_exit(). The only known offender was do_fast_syscall_32() fetching EBP register from where vDSO stashed it. Meanwhile this got fixed in: 9999c8c01f34c9 ("x86/entry: Call enter_from_user_mode() with IRQs off") that moved enter_from_user_mode() before the call to get_user(). So we can safely remove it now. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jim Mattson <jmattson@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Link: https://lkml.kernel.org/r/20191227163612.10039-2-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c39
1 files changed, 12 insertions, 27 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 304d31d8cbbc..2b4ab2862eda 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1486,27 +1486,6 @@ good_area:
}
NOKPROBE_SYMBOL(do_user_addr_fault);
-/*
- * Explicitly marked noinline such that the function tracer sees this as the
- * page_fault entry point.
- */
-static noinline void
-__do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
- unsigned long address)
-{
- prefetchw(&current->mm->mmap_sem);
-
- if (unlikely(kmmio_fault(regs, address)))
- return;
-
- /* Was the fault on kernel-controlled part of the address space? */
- if (unlikely(fault_in_kernel_space(address)))
- do_kern_addr_fault(regs, hw_error_code, address);
- else
- do_user_addr_fault(regs, hw_error_code, address);
-}
-NOKPROBE_SYMBOL(__do_page_fault);
-
static __always_inline void
trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
@@ -1521,13 +1500,19 @@ trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
}
dotraplinkage void
-do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
+ unsigned long address)
{
- enum ctx_state prev_state;
+ prefetchw(&current->mm->mmap_sem);
+ trace_page_fault_entries(regs, hw_error_code, address);
- prev_state = exception_enter();
- trace_page_fault_entries(regs, error_code, address);
- __do_page_fault(regs, error_code, address);
- exception_exit(prev_state);
+ if (unlikely(kmmio_fault(regs, address)))
+ return;
+
+ /* Was the fault on kernel-controlled part of the address space? */
+ if (unlikely(fault_in_kernel_space(address)))
+ do_kern_addr_fault(regs, hw_error_code, address);
+ else
+ do_user_addr_fault(regs, hw_error_code, address);
}
NOKPROBE_SYMBOL(do_page_fault);