summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/entry-ftrace.S12
-rw-r--r--arch/arm64/kernel/entry.S14
-rw-r--r--arch/arm64/kernel/perf_event.c41
-rw-r--r--arch/arm64/kernel/probes/kprobes.c4
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S6
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/syscall.c10
8 files changed, 23 insertions, 73 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f42fd9e33981..301784463587 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,7 +75,7 @@ int main(void)
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
- DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index a338f40e64d3..b3e4f9a088b1 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -35,7 +35,7 @@
*/
.macro ftrace_regs_entry, allregs=0
/* Make room for pt_regs, plus a callee frame */
- sub sp, sp, #(S_FRAME_SIZE + 16)
+ sub sp, sp, #(PT_REGS_SIZE + 16)
/* Save function arguments (and x9 for simplicity) */
stp x0, x1, [sp, #S_X0]
@@ -61,15 +61,15 @@
.endif
/* Save the callsite's SP and LR */
- add x10, sp, #(S_FRAME_SIZE + 16)
+ add x10, sp, #(PT_REGS_SIZE + 16)
stp x9, x10, [sp, #S_LR]
/* Save the PC after the ftrace callsite */
str x30, [sp, #S_PC]
/* Create a frame record for the callsite above pt_regs */
- stp x29, x9, [sp, #S_FRAME_SIZE]
- add x29, sp, #S_FRAME_SIZE
+ stp x29, x9, [sp, #PT_REGS_SIZE]
+ add x29, sp, #PT_REGS_SIZE
/* Create our frame record within pt_regs. */
stp x29, x30, [sp, #S_STACKFRAME]
@@ -120,7 +120,7 @@ ftrace_common_return:
ldr x9, [sp, #S_PC]
/* Restore the callsite's SP */
- add sp, sp, #S_FRAME_SIZE + 16
+ add sp, sp, #PT_REGS_SIZE + 16
ret x9
SYM_CODE_END(ftrace_common)
@@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
+ ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a8c3e7aaca74..c9bae73f2621 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -75,7 +75,7 @@ alternative_else_nop_endif
.endif
#endif
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
#ifdef CONFIG_VMAP_STACK
/*
* Test whether the SP has overflowed, without corrupting a GPR.
@@ -96,7 +96,7 @@ alternative_else_nop_endif
* userspace, and can clobber EL0 registers to free up GPRs.
*/
- /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+ /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
msr tpidr_el0, x0
/* Recover the original x0 value and stash it in tpidrro_el0 */
@@ -253,7 +253,7 @@ alternative_else_nop_endif
scs_load tsk, x20
.else
- add x21, sp, #S_FRAME_SIZE
+ add x21, sp, #PT_REGS_SIZE
get_current_task tsk
.endif /* \el == 0 */
mrs x22, elr_el1
@@ -377,7 +377,7 @@ alternative_else_nop_endif
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
- add sp, sp, #S_FRAME_SIZE // restore sp
+ add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@@ -580,12 +580,12 @@ __bad_stack:
/*
* Store the original GPRs to the new stack. The orginal SP (minus
- * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+ * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
*/
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
kernel_entry 1
mrs x0, tpidr_el0
- add x0, x0, #S_FRAME_SIZE
+ add x0, x0, #PT_REGS_SIZE
str x0, [sp, #S_SP]
/* Stash the regs for handle_bad_stack */
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 38bb07eff872..3605f77ad4df 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -23,8 +23,6 @@
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
-#include <linux/nmi.h>
-#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void)
{
- int ret;
-
if (acpi_disabled)
- ret = platform_driver_register(&armv8_pmu_driver);
+ return platform_driver_register(&armv8_pmu_driver);
else
- ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
-
- /*
- * Try to re-initialize lockup detector after PMU init in
- * case PMU events are triggered via NMIs.
- */
- if (ret == 0 && arm_pmu_irq_is_nmi())
- lockup_detector_init();
-
- return ret;
+ return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)
@@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
-/*
- * Safe maximum CPU frequency in case a particular platform doesn't implement
- * cpufreq driver. Although, architecture doesn't put any restrictions on
- * maximum frequency but 5 GHz seems to be safe maximum given the available
- * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
- * hand, we can't make it much higher as it would lead to a large hard-lockup
- * detection timeout on parts which are running slower (eg. 1GHz on
- * Developerbox) and doesn't possess a cpufreq driver.
- */
-#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
-u64 hw_nmi_get_sample_period(int watchdog_thresh)
-{
- unsigned int cpu = smp_processor_id();
- unsigned long max_cpu_freq;
-
- max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
- if (!max_cpu_freq)
- max_cpu_freq = SAFE_MAX_CPU_FREQ;
-
- return (u64)max_cpu_freq * watchdog_thresh;
-}
-#endif
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 89c64ada8732..66aac2881ba8 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running();
- if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
- && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
+ if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+ ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(cur, kcb, regs);
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 890ca72c5a51..288a84e253cc 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -25,7 +25,7 @@
stp x24, x25, [sp, #S_X24]
stp x26, x27, [sp, #S_X26]
stp x28, x29, [sp, #S_X28]
- add x0, sp, #S_FRAME_SIZE
+ add x0, sp, #PT_REGS_SIZE
stp lr, x0, [sp, #S_LR]
/*
* Construct a useful saved PSTATE
@@ -62,7 +62,7 @@
.endm
SYM_CODE_START(kretprobe_trampoline)
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
save_all_base_regs
@@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
restore_all_base_regs
- add sp, sp, #S_FRAME_SIZE
+ add sp, sp, #PT_REGS_SIZE
ret
SYM_CODE_END(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index f71d6ce4673f..6237486ff6bb 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
- /*
- * The assembly code enters us with IRQs off, but it hasn't
- * informed the tracing code of that for efficiency reasons.
- * Update the trace code with the current status.
- */
- trace_hardirqs_off();
-
do {
if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index f61e9d8cc55a..c2877c332f2d 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -9,6 +9,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
+#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
- if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
- /*
- * We're off to userspace, where interrupts are
- * always enabled after we restore the flags from
- * the SPSR.
- */
- trace_hardirqs_on();
+ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
- }
local_daif_restore(DAIF_PROCCTX);
}