summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/interrupt.h28
-rw-r--r--arch/powerpc/kernel/interrupt.c16
2 files changed, 1 insertions, 43 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 104a77c00a31..a2f551938e64 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -21,9 +21,6 @@ static inline void nap_adjust_return(struct pt_regs *regs)
}
struct interrupt_state {
-#ifdef CONFIG_PPC_BOOK3E_64
- enum ctx_state ctx_state;
-#endif
};
static inline void booke_restore_dbcr0(void)
@@ -56,9 +53,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
if (user_mode(regs)) {
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
@@ -75,12 +70,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
}
#endif
-#ifdef CONFIG_PPC_BOOK3E_64
- state->ctx_state = exception_enter();
- if (user_mode(regs))
- account_cpu_user_entry();
-#endif
-
booke_restore_dbcr0();
}
@@ -100,25 +89,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
-#ifdef CONFIG_PPC_BOOK3E_64
- exception_exit(state->ctx_state);
-#endif
-
if (user_mode(regs))
kuep_unlock();
- /*
- * Book3S exits to user via interrupt_exit_user_prepare(), which does
- * context tracking, which is a cleaner way to handle PREEMPT=y
- * and avoid context entry/exit in e.g., preempt_schedule_irq()),
- * which is likely to be where the core code wants to end up.
- *
- * The above comment explains why we can't do the
- *
- * if (user_mode(regs))
- * user_exit_irqoff();
- *
- * sequence here.
- */
}
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 381a618b5b5b..1b0e1792ac37 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -235,10 +235,6 @@ static notrace void booke_load_dbcr0(void)
#endif
}
-/* temporary hack for context tracking, removed in later patch */
-#include <linux/sched/debug.h>
-asmlinkage __visible void __sched schedule_user(void);
-
/*
* This should be called after a syscall returns, with r3 the return value
* from the syscall. If this function returns non-zero, the system call
@@ -296,11 +292,7 @@ again:
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) {
-#ifdef CONFIG_PPC_BOOK3E_64
- schedule_user();
-#else
schedule();
-#endif
} else {
/*
* SIGPENDING must restore signal handler function
@@ -375,9 +367,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
BUG_ON(arch_irq_disabled_regs(regs));
-#ifdef CONFIG_PPC_BOOK3S_64
CT_WARN_ON(ct_state() == CONTEXT_USER);
-#endif
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
@@ -392,11 +382,7 @@ again:
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable(); /* returning to user: may enable */
if (ti_flags & _TIF_NEED_RESCHED) {
-#ifdef CONFIG_PPC_BOOK3E_64
- schedule_user();
-#else
schedule();
-#endif
} else {
if (ti_flags & _TIF_SIGPENDING)
ret |= _TIF_RESTOREALL;
@@ -464,7 +450,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (IS_ENABLED(CONFIG_BOOKS) && TRAP(regs) != 0x700)
+ if (TRAP(regs) != 0x700)
CT_WARN_ON(ct_state() == CONTEXT_USER);
kuap = kuap_get_and_assert_locked();