summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2022-12-06 11:07:39 +0000
committerWill Deacon <will@kernel.org>2022-12-06 11:07:39 +0000
commita4aebff7ef608880149851e0679e3ab455c0d42f (patch)
tree6c81284c196878118b925085cd55b4818e601373
parent1a916ed79bc0e94c98001e36403b5c014a222fb4 (diff)
parentcfce092dae95ed81391e49f273353a96cc6dec64 (diff)
downloadlinux-a4aebff7ef608880149851e0679e3ab455c0d42f.tar.bz2
Merge branch 'for-next/ftrace' into for-next/core
* for-next/ftrace: ftrace: arm64: remove static ftrace ftrace: arm64: move from REGS to ARGS ftrace: abstract DYNAMIC_FTRACE_WITH_ARGS accesses ftrace: rename ftrace_instruction_pointer_set() -> ftrace_regs_set_instruction_pointer() ftrace: pass fregs to arch_ftrace_set_direct_caller()
-rw-r--r--arch/arm64/Kconfig19
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/ftrace.h72
-rw-r--r--arch/arm64/kernel/asm-offsets.c13
-rw-r--r--arch/arm64/kernel/entry-ftrace.S156
-rw-r--r--arch/arm64/kernel/ftrace.c87
-rw-r--r--arch/arm64/kernel/module.c3
-rw-r--r--arch/powerpc/include/asm/ftrace.h24
-rw-r--r--arch/s390/include/asm/ftrace.h29
-rw-r--r--arch/x86/include/asm/ftrace.h49
-rw-r--r--include/linux/ftrace.h47
-rw-r--r--kernel/livepatch/patch.c2
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/ftrace.c3
14 files changed, 310 insertions, 202 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0cb1443cc26f..957ec4d02428 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -118,6 +118,7 @@ config ARM64
select CPU_PM if (SUSPEND || CPU_IDLE)
select CRC32
select DCACHE_WORD_ACCESS
+ select DYNAMIC_FTRACE if FUNCTION_TRACER
select DMA_DIRECT_REMAP
select EDAC_SUPPORT
select FRAME_POINTER
@@ -182,8 +183,10 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
+ if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
- if DYNAMIC_FTRACE_WITH_REGS
+ if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
@@ -234,16 +237,16 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
-config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
- select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS
-config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=2)
- select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS
config 64BIT
def_bool y
@@ -1836,7 +1839,7 @@ config ARM64_PTR_AUTH_KERNEL
# which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help
If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option
@@ -1846,7 +1849,7 @@ config ARM64_PTR_AUTH_KERNEL
disabled with minimal loss of protection.
This feature works with FUNCTION_GRAPH_TRACER option only if
- DYNAMIC_FTRACE_WITH_REGS is enabled.
+ DYNAMIC_FTRACE_WITH_ARGS is enabled.
config CC_HAS_BRANCH_PROT_PAC_RET
# GCC 9 or later, clang 8 or later
@@ -1944,7 +1947,7 @@ config ARM64_BTI_KERNEL
depends on !CC_IS_GCC
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
help
Build the kernel with Branch Target Identification annotations
and enable enforcement of this for kernel code. When this option
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4d272ad1df1f..d62bd221828f 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -139,7 +139,7 @@ endif
CHECKFLAGS += -D__aarch64__
-ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
+ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 329dbbd4d50b..5664729800ae 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -23,7 +23,7 @@
*/
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#else
#define MCOUNT_ADDR ((unsigned long)_mcount)
@@ -33,8 +33,7 @@
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
#define FTRACE_PLT_IDX 0
-#define FTRACE_REGS_PLT_IDX 1
-#define NR_FTRACE_PLTS 2
+#define NR_FTRACE_PLTS 1
/*
* Currently, gcc tends to save the link register after the local variables
@@ -69,7 +68,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
* Adjust addr to point at the BL in the callsite.
* See ftrace_init_nop() for the callsite sequence.
*/
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
return addr + AARCH64_INSN_SIZE;
/*
* addr is the address of the mcount call instruction.
@@ -78,10 +77,71 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
struct dyn_ftrace;
struct ftrace_ops;
-struct ftrace_regs;
+
+#define arch_ftrace_get_regs(regs) NULL
+
+struct ftrace_regs {
+ /* x0 - x8 */
+ unsigned long regs[9];
+ unsigned long __unused;
+
+ unsigned long fp;
+ unsigned long lr;
+
+ unsigned long sp;
+ unsigned long pc;
+};
+
+static __always_inline unsigned long
+ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->pc;
+}
+
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long pc)
+{
+ fregs->pc = pc;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->sp;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n)
+{
+ if (n < 8)
+ return fregs->regs[n];
+ return 0;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
+{
+ return fregs->regs[0];
+}
+
+static __always_inline void
+ftrace_regs_set_return_value(struct ftrace_regs *fregs,
+ unsigned long ret)
+{
+ fregs->regs[0] = ret;
+}
+
+static __always_inline void
+ftrace_override_function_with_return(struct ftrace_regs *fregs)
+{
+ fregs->pc = fregs->lr;
+}
+
+int ftrace_regs_query_register_offset(const char *name);
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 1197e7679882..2234624536d9 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -82,6 +82,19 @@ int main(void)
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+ DEFINE(FREGS_X0, offsetof(struct ftrace_regs, regs[0]));
+ DEFINE(FREGS_X2, offsetof(struct ftrace_regs, regs[2]));
+ DEFINE(FREGS_X4, offsetof(struct ftrace_regs, regs[4]));
+ DEFINE(FREGS_X6, offsetof(struct ftrace_regs, regs[6]));
+ DEFINE(FREGS_X8, offsetof(struct ftrace_regs, regs[8]));
+ DEFINE(FREGS_FP, offsetof(struct ftrace_regs, fp));
+ DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr));
+ DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp));
+ DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc));
+ DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs));
+ BLANK();
+#endif
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0));
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 795344ab4ec4..30cc2a9d1757 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -13,83 +13,58 @@
#include <asm/ftrace.h>
#include <asm/insn.h>
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/*
* Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
* the regular function prologue. For an enabled callsite, ftrace_init_nop() and
* ftrace_make_call() have patched those NOPs to:
*
* MOV X9, LR
- * BL <entry>
- *
- * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
+ * BL ftrace_caller
*
* Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
* live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
* clobber.
*
- * We save the callsite's context into a pt_regs before invoking any ftrace
- * callbacks. So that we can get a sensible backtrace, we create a stack record
- * for the callsite and the ftrace entry assembly. This is not sufficient for
- * reliable stacktrace: until we create the callsite stack record, its caller
- * is missing from the LR and existing chain of frame records.
+ * We save the callsite's context into a struct ftrace_regs before invoking any
+ * ftrace callbacks. So that we can get a sensible backtrace, we create frame
+ * records for the callsite and the ftrace entry assembly. This is not
+ * sufficient for reliable stacktrace: until we create the callsite stack
+ * record, its caller is missing from the LR and existing chain of frame
+ * records.
*/
- .macro ftrace_regs_entry, allregs=0
- /* Make room for pt_regs, plus a callee frame */
- sub sp, sp, #(PT_REGS_SIZE + 16)
-
- /* Save function arguments (and x9 for simplicity) */
- stp x0, x1, [sp, #S_X0]
- stp x2, x3, [sp, #S_X2]
- stp x4, x5, [sp, #S_X4]
- stp x6, x7, [sp, #S_X6]
- stp x8, x9, [sp, #S_X8]
-
- /* Optionally save the callee-saved registers, always save the FP */
- .if \allregs == 1
- stp x10, x11, [sp, #S_X10]
- stp x12, x13, [sp, #S_X12]
- stp x14, x15, [sp, #S_X14]
- stp x16, x17, [sp, #S_X16]
- stp x18, x19, [sp, #S_X18]
- stp x20, x21, [sp, #S_X20]
- stp x22, x23, [sp, #S_X22]
- stp x24, x25, [sp, #S_X24]
- stp x26, x27, [sp, #S_X26]
- stp x28, x29, [sp, #S_X28]
- .else
- str x29, [sp, #S_FP]
- .endif
-
- /* Save the callsite's SP and LR */
- add x10, sp, #(PT_REGS_SIZE + 16)
- stp x9, x10, [sp, #S_LR]
+SYM_CODE_START(ftrace_caller)
+ bti c
- /* Save the PC after the ftrace callsite */
- str x30, [sp, #S_PC]
+ /* Save original SP */
+ mov x10, sp
- /* Create a frame record for the callsite above pt_regs */
- stp x29, x9, [sp, #PT_REGS_SIZE]
- add x29, sp, #PT_REGS_SIZE
+ /* Make room for ftrace regs, plus two frame records */
+ sub sp, sp, #(FREGS_SIZE + 32)
- /* Create our frame record within pt_regs. */
- stp x29, x30, [sp, #S_STACKFRAME]
- add x29, sp, #S_STACKFRAME
- .endm
+ /* Save function arguments */
+ stp x0, x1, [sp, #FREGS_X0]
+ stp x2, x3, [sp, #FREGS_X2]
+ stp x4, x5, [sp, #FREGS_X4]
+ stp x6, x7, [sp, #FREGS_X6]
+ str x8, [sp, #FREGS_X8]
-SYM_CODE_START(ftrace_regs_caller)
- bti c
- ftrace_regs_entry 1
- b ftrace_common
-SYM_CODE_END(ftrace_regs_caller)
+ /* Save the callsite's FP, LR, SP */
+ str x29, [sp, #FREGS_FP]
+ str x9, [sp, #FREGS_LR]
+ str x10, [sp, #FREGS_SP]
-SYM_CODE_START(ftrace_caller)
- bti c
- ftrace_regs_entry 0
- b ftrace_common
-SYM_CODE_END(ftrace_caller)
+ /* Save the PC after the ftrace callsite */
+ str x30, [sp, #FREGS_PC]
+
+ /* Create a frame record for the callsite above the ftrace regs */
+ stp x29, x9, [sp, #FREGS_SIZE + 16]
+ add x29, sp, #FREGS_SIZE + 16
+
+ /* Create our frame record above the ftrace regs */
+ stp x29, x30, [sp, #FREGS_SIZE]
+ add x29, sp, #FREGS_SIZE
-SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op
@@ -104,24 +79,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
* to restore x0-x8, x29, and x30.
*/
/* Restore function arguments */
- ldp x0, x1, [sp]
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- ldr x8, [sp, #S_X8]
+ ldp x0, x1, [sp, #FREGS_X0]
+ ldp x2, x3, [sp, #FREGS_X2]
+ ldp x4, x5, [sp, #FREGS_X4]
+ ldp x6, x7, [sp, #FREGS_X6]
+ ldr x8, [sp, #FREGS_X8]
/* Restore the callsite's FP, LR, PC */
- ldr x29, [sp, #S_FP]
- ldr x30, [sp, #S_LR]
- ldr x9, [sp, #S_PC]
+ ldr x29, [sp, #FREGS_FP]
+ ldr x30, [sp, #FREGS_LR]
+ ldr x9, [sp, #FREGS_PC]
/* Restore the callsite's SP */
- add sp, sp, #PT_REGS_SIZE + 16
+ add sp, sp, #FREGS_SIZE + 32
ret x9
-SYM_CODE_END(ftrace_common)
+SYM_CODE_END(ftrace_caller)
-#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
/*
* Gcc with -pg will put the following code in the beginning of each function:
@@ -195,44 +170,6 @@ SYM_CODE_END(ftrace_common)
add \reg, \reg, #8
.endm
-#ifndef CONFIG_DYNAMIC_FTRACE
-/*
- * void _mcount(unsigned long return_address)
- * @return_address: return address to instrumented function
- *
- * This function makes calls, if enabled, to:
- * - tracer function to probe instrumented function's entry,
- * - ftrace_graph_caller to set up an exit hook
- */
-SYM_FUNC_START(_mcount)
- mcount_enter
-
- ldr_l x2, ftrace_trace_function
- adr x0, ftrace_stub
- cmp x0, x2 // if (ftrace_trace_function
- b.eq skip_ftrace_call // != ftrace_stub) {
-
- mcount_get_pc x0 // function's pc
- mcount_get_lr x1 // function's lr (= parent's pc)
- blr x2 // (*ftrace_trace_function)(pc, lr);
-
-skip_ftrace_call: // }
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ldr_l x2, ftrace_graph_return
- cmp x0, x2 // if ((ftrace_graph_return
- b.ne ftrace_graph_caller // != ftrace_stub)
-
- ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
- adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
- cmp x0, x2
- b.ne ftrace_graph_caller // ftrace_graph_caller();
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
- mcount_exit
-SYM_FUNC_END(_mcount)
-EXPORT_SYMBOL(_mcount)
-NOKPROBE(_mcount)
-
-#else /* CONFIG_DYNAMIC_FTRACE */
/*
* _mcount() is used to build the kernel with -pg option, but all the branch
* instructions to _mcount() are replaced to NOP initially at kernel start up,
@@ -272,7 +209,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
mcount_exit
SYM_FUNC_END(ftrace_caller)
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -293,7 +229,7 @@ SYM_FUNC_START(ftrace_graph_caller)
mcount_exit
SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
SYM_TYPED_FUNC_START(ftrace_stub)
ret
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 8745175f4a75..b30b955a8921 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -17,7 +17,49 @@
#include <asm/insn.h>
#include <asm/patching.h>
-#ifdef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+struct fregs_offset {
+ const char *name;
+ int offset;
+};
+
+#define FREGS_OFFSET(n, field) \
+{ \
+ .name = n, \
+ .offset = offsetof(struct ftrace_regs, field), \
+}
+
+static const struct fregs_offset fregs_offsets[] = {
+ FREGS_OFFSET("x0", regs[0]),
+ FREGS_OFFSET("x1", regs[1]),
+ FREGS_OFFSET("x2", regs[2]),
+ FREGS_OFFSET("x3", regs[3]),
+ FREGS_OFFSET("x4", regs[4]),
+ FREGS_OFFSET("x5", regs[5]),
+ FREGS_OFFSET("x6", regs[6]),
+ FREGS_OFFSET("x7", regs[7]),
+ FREGS_OFFSET("x8", regs[8]),
+
+ FREGS_OFFSET("x29", fp),
+ FREGS_OFFSET("x30", lr),
+ FREGS_OFFSET("lr", lr),
+
+ FREGS_OFFSET("sp", sp),
+ FREGS_OFFSET("pc", pc),
+};
+
+int ftrace_regs_query_register_offset(const char *name)
+{
+ for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
+ const struct fregs_offset *roff = &fregs_offsets[i];
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ }
+
+ return -EINVAL;
+}
+#endif
+
/*
* Replace a single instruction, which may be a branch or NOP.
* If @validate == true, a replaced instruction is checked against 'old'.
@@ -70,9 +112,6 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
- if (addr == FTRACE_REGS_ADDR &&
- IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
- return &plt[FTRACE_REGS_PLT_IDX];
#endif
return NULL;
}
@@ -154,25 +193,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(pc, old, new, true);
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
- unsigned long addr)
-{
- unsigned long pc = rec->ip;
- u32 old, new;
-
- if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
- return -EINVAL;
- if (!ftrace_find_callable_addr(rec, NULL, &addr))
- return -EINVAL;
-
- old = aarch64_insn_gen_branch_imm(pc, old_addr,
- AARCH64_INSN_BRANCH_LINK);
- new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
-
- return ftrace_modify_code(pc, old, new, true);
-}
-
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/*
* The compiler has inserted two NOPs before the regular function prologue.
* All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
@@ -228,7 +249,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
*
* Note: 'mod' is only set at module load time.
*/
- if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
@@ -246,7 +267,6 @@ void arch_ftrace_update_code(int command)
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -277,21 +297,11 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
}
}
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- /*
- * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
- * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
- * in which we can safely modify the LR.
- */
- struct pt_regs *regs = arch_ftrace_get_regs(fregs);
- unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
-
- prepare_ftrace_return(ip, parent, frame_pointer(regs));
+ prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
}
#else
/*
@@ -323,6 +333,5 @@ int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fa7b3228944b..5af4975caeb5 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -499,9 +499,6 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
- __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
-
mod->arch.ftrace_trampolines = plts;
#endif
return 0;
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 3cee7115441b..259b9dd5fe1c 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -37,12 +37,32 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return fregs->regs.msr ? &fregs->regs : NULL;
}
-static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
- unsigned long ip)
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long ip)
{
regs_set_return_ip(&fregs->regs, ip);
}
+static __always_inline unsigned long
+ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
+{
+ return instruction_pointer(&fregs->regs);
+}
+
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&(fregs)->regs)
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(&(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+
struct ftrace_ops;
#define ftrace_graph_func ftrace_graph_func
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 6f80ec9c04be..e5c5cb1207e2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -54,12 +54,33 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return NULL;
}
-static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
- unsigned long ip)
+static __always_inline unsigned long
+ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->regs.psw.addr;
+}
+
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long ip)
{
fregs->regs.psw.addr = ip;
}
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&(fregs)->regs)
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(&(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* When an ftrace registered caller is tracing a function that is
* also set by a register_ftrace_direct() call, it needs to be
@@ -67,10 +88,12 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f
* place the direct caller in the ORIG_GPR2 part of pt_regs. This
* tells the ftrace_caller that there's a direct caller.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
+ struct pt_regs *regs = &fregs->regs;
regs->orig_gpr2 = addr;
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/*
* Even though the system call numbers are identical for s390/s390x a
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 908d99b127d3..5061ac98ffa1 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -34,19 +34,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
-/*
- * When a ftrace registered caller is tracing a function that is
- * also set by a register_ftrace_direct() call, it needs to be
- * differentiated in the ftrace_caller trampoline. To do this, we
- * place the direct caller in the ORIG_AX part of pt_regs. This
- * tells the ftrace_caller that there's a direct caller.
- */
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
-{
- /* Emulate a call */
- regs->orig_ax = addr;
-}
-
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
struct ftrace_regs {
struct pt_regs regs;
@@ -61,9 +48,25 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
return &fregs->regs;
}
-#define ftrace_instruction_pointer_set(fregs, _ip) \
+#define ftrace_regs_set_instruction_pointer(fregs, _ip) \
do { (fregs)->regs.ip = (_ip); } while (0)
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ ((fregs)->regs.ip)
+
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&(fregs)->regs)
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(&(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+
struct ftrace_ops;
#define ftrace_graph_func ftrace_graph_func
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
@@ -72,6 +75,24 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/*
+ * When a ftrace registered caller is tracing a function that is
+ * also set by a register_ftrace_direct() call, it needs to be
+ * differentiated in the ftrace_caller trampoline. To do this, we
+ * place the direct caller in the ORIG_AX part of pt_regs. This
+ * tells the ftrace_caller that there's a direct caller.
+ */
+static inline void
+__arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+{
+ /* Emulate a call */
+ regs->orig_ax = addr;
+}
+#define arch_ftrace_set_direct_caller(fregs, addr) \
+ __arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 62557d4bffc2..99f1146614c0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -37,9 +37,10 @@ extern void ftrace_boot_snapshot(void);
static inline void ftrace_boot_snapshot(void) { }
#endif
-#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops;
struct ftrace_regs;
+
+#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
@@ -110,12 +111,11 @@ struct ftrace_regs {
#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
/*
- * ftrace_instruction_pointer_set() is to be defined by the architecture
- * if to allow setting of the instruction pointer from the ftrace_regs
- * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
- * live kernel patching.
+ * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs when
+ * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
*/
-#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
+#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
@@ -126,6 +126,35 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
return arch_ftrace_get_regs(fregs);
}
+/*
+ * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
+ * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
+ */
+static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
+{
+ if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
+ return true;
+
+ return ftrace_get_regs(fregs) != NULL;
+}
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(ftrace_get_regs(fregs), n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(ftrace_get_regs(fregs))
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(ftrace_get_regs(fregs), ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(ftrace_get_regs(fregs))
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#endif
+
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
@@ -427,9 +456,7 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi
{
return -ENODEV;
}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
@@ -443,9 +470,9 @@ static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsi
* the return from the trampoline jump to the direct caller
* instead of going back to the function it just traced.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr) { }
-#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#ifdef CONFIG_STACK_TRACER
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 4c4f5a776d80..4152c71507e2 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -118,7 +118,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
if (func->nop)
goto unlock;
- ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func);
+ ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func);
unlock:
ftrace_test_recursion_unlock(bit);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e9e95c790b8e..2c6611c13f99 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -46,10 +46,10 @@ config HAVE_DYNAMIC_FTRACE_WITH_ARGS
bool
help
If this is set, then arguments and stack can be found from
- the pt_regs passed into the function callback regs parameter
+ the ftrace_regs passed into the function callback regs parameter
by default, even without setting the REGS flag in the ftrace_ops.
- This allows for use of regs_get_kernel_argument() and
- kernel_stack_pointer().
+ This allows for use of ftrace_regs_get_argument() and
+ ftrace_regs_get_stack_pointer().
config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7dc023641bf1..9eb42120ac44 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2487,14 +2487,13 @@ ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
static void call_direct_funcs(unsigned long ip, unsigned long pip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
- struct pt_regs *regs = ftrace_get_regs(fregs);
unsigned long addr;
addr = ftrace_find_rec_direct(ip);
if (!addr)
return;
- arch_ftrace_set_direct_caller(regs, addr);
+ arch_ftrace_set_direct_caller(fregs, addr);
}
struct ftrace_ops direct_ops = {