diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-27 11:42:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-27 11:42:01 -0800 |
commit | 95f1fa9e3418d50ce099e67280b5497b9c93843b (patch) | |
tree | b8617e471a9b9993ac1bad48e2d954335f8a6232 /include | |
parent | 477093b3e144aa0ece07a5fd2a84013d037e2776 (diff) | |
parent | 16c0f03f629a89e6a1249497202b2c154ff46206 (diff) | |
download | linux-95f1fa9e3418d50ce099e67280b5497b9c93843b.tar.bz2 |
Merge tag 'trace-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"New tracing features:
- New PERMANENT flag to ftrace_ops when attaching a callback to a
function.
As /proc/sys/kernel/ftrace_enabled when set to zero will disable
all attached callbacks in ftrace, this has a detrimental impact on
live kernel tracing, as it disables all that it patched. If a
ftrace_ops is registered to ftrace with the PERMANENT flag set, it
will prevent ftrace_enabled from being disabled, and if
ftrace_enabled is already disabled, it will prevent a ftrace_ops
with PREMANENT flag set from being registered.
- New register_ftrace_direct().
As eBPF would like to register its own trampolines to be called by
the ftrace nop locations directly, without going through the ftrace
trampoline, this function has been added. This allows for eBPF
trampolines to live along side of ftrace, perf, kprobe and live
patching. It also utilizes the ftrace enabled_functions file that
keeps track of functions that have been modified in the kernel, to
allow for security auditing.
- Allow for kernel internal use of ftrace instances.
Subsystems in the kernel can now create and destroy their own
tracing instances which allows them to have their own tracing
buffer, and be able to record events without worrying about other
users from writing over their data.
- New seq_buf_hex_dump() that lets users use the hex_dump() in their
seq_buf usage.
- Notifications now added to tracing_max_latency to allow user space
to know when a new max latency is hit by one of the latency
tracers.
- Wider spread use of generic compare operations for use of bsearch
and friends.
- More synthetic event fields may be defined (32 up from 16)
- Use of xarray for architectures with sparse system calls, for the
system call trace events.
This along with small clean ups and fixes"
* tag 'trace-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (51 commits)
tracing: Enable syscall optimization for MIPS
tracing: Use xarray for syscall trace events
tracing: Sample module to demonstrate kernel access to Ftrace instances.
tracing: Adding new functions for kernel access to Ftrace instances
tracing: Fix Kconfig indentation
ring-buffer: Fix typos in function ring_buffer_producer
ftrace: Use BIT() macro
ftrace: Return ENOTSUPP when DYNAMIC_FTRACE_WITH_DIRECT_CALLS is not configured
ftrace: Rename ftrace_graph_stub to ftrace_stub_graph
ftrace: Add a helper function to modify_ftrace_direct() to allow arch optimization
ftrace: Add helper find_direct_entry() to consolidate code
ftrace: Add another check for match in register_ftrace_direct()
ftrace: Fix accounting bug with direct->count in register_ftrace_direct()
ftrace/selftests: Fix spelling mistake "wakeing" -> "waking"
tracing: Increase SYNTH_FIELDS_MAX for synthetic_events
ftrace/samples: Add a sample module that implements modify_ftrace_direct()
ftrace: Add modify_ftrace_direct()
tracing: Add missing "inline" in stub function of latency_fsnotify()
tracing: Remove stray tab in TRACE_EVAL_MAP_FILE's help text
tracing: Use seq_buf_hex_dump() to dump buffers
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 13 | ||||
-rw-r--r-- | include/linux/bsearch.h | 2 | ||||
-rw-r--r-- | include/linux/ftrace.h | 112 | ||||
-rw-r--r-- | include/linux/seq_buf.h | 3 | ||||
-rw-r--r-- | include/linux/sort.h | 8 | ||||
-rw-r--r-- | include/linux/trace.h | 8 | ||||
-rw-r--r-- | include/linux/trace_events.h | 8 | ||||
-rw-r--r-- | include/linux/trace_seq.h | 4 | ||||
-rw-r--r-- | include/linux/types.h | 5 | ||||
-rw-r--r-- | include/trace/trace_events.h | 6 |
10 files changed, 141 insertions, 28 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 63cedc3c0c77..e00f41aa8ec4 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -141,14 +141,23 @@ * compiler option used. A given kernel image will only use one, AKA * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header * dependencies for FTRACE_CALLSITE_SECTION's definition. + * + * Need to also make ftrace_stub_graph point to ftrace_stub + * so that the same stub location may have different protocols + * and not mess up with C verifiers. */ #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ KEEP(*(__patchable_function_entries)) \ - __stop_mcount_loc = .; + __stop_mcount_loc = .; \ + ftrace_stub_graph = ftrace_stub; #else -#define MCOUNT_REC() +# ifdef CONFIG_FUNCTION_TRACER +# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; +# else +# define MCOUNT_REC() +# endif #endif #ifdef CONFIG_TRACE_BRANCH_PROFILING diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h index 62b1eb348858..8ed53d7524ea 100644 --- a/include/linux/bsearch.h +++ b/include/linux/bsearch.h @@ -5,6 +5,6 @@ #include <linux/types.h> void *bsearch(const void *key, const void *base, size_t num, size_t size, - int (*cmp)(const void *key, const void *elt)); + cmp_func_t cmp); #endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9141f2263286..7247d35c3d16 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -51,6 +51,7 @@ static inline void early_trace_init(void) { } struct module; struct ftrace_hash; +struct ftrace_direct_func; #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ defined(CONFIG_DYNAMIC_FTRACE) @@ -142,24 +143,30 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * PID - Is affected by set_ftrace_pid (allows filtering on those pids) * RCU - Set when the ops can only be called when RCU is watching. * TRACE_ARRAY - The ops->private points to a trace_array descriptor. + * PERMANENT - Set when the ops is permanent and should not be affected by + * ftrace_enabled. + * DIRECT - Used by the direct ftrace_ops helper for direct functions + * (internal ftrace only, should not be used by others) */ enum { - FTRACE_OPS_FL_ENABLED = 1 << 0, - FTRACE_OPS_FL_DYNAMIC = 1 << 1, - FTRACE_OPS_FL_SAVE_REGS = 1 << 2, - FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3, - FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4, - FTRACE_OPS_FL_STUB = 1 << 5, - FTRACE_OPS_FL_INITIALIZED = 1 << 6, - FTRACE_OPS_FL_DELETED = 1 << 7, - FTRACE_OPS_FL_ADDING = 1 << 8, - FTRACE_OPS_FL_REMOVING = 1 << 9, - FTRACE_OPS_FL_MODIFYING = 1 << 10, - FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11, - FTRACE_OPS_FL_IPMODIFY = 1 << 12, - FTRACE_OPS_FL_PID = 1 << 13, - FTRACE_OPS_FL_RCU = 1 << 14, - FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15, + FTRACE_OPS_FL_ENABLED = BIT(0), + FTRACE_OPS_FL_DYNAMIC = BIT(1), + FTRACE_OPS_FL_SAVE_REGS = BIT(2), + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), + FTRACE_OPS_FL_RECURSION_SAFE = BIT(4), + FTRACE_OPS_FL_STUB = BIT(5), + FTRACE_OPS_FL_INITIALIZED = BIT(6), + FTRACE_OPS_FL_DELETED = BIT(7), + FTRACE_OPS_FL_ADDING = BIT(8), + FTRACE_OPS_FL_REMOVING = BIT(9), + FTRACE_OPS_FL_MODIFYING = BIT(10), + FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), + FTRACE_OPS_FL_IPMODIFY = BIT(12), + FTRACE_OPS_FL_PID = BIT(13), + FTRACE_OPS_FL_RCU = BIT(14), + FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), + FTRACE_OPS_FL_PERMANENT = BIT(16), + FTRACE_OPS_FL_DIRECT = BIT(17), }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -239,6 +246,70 @@ static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif /* CONFIG_FUNCTION_TRACER */ +struct ftrace_func_entry { + struct hlist_node hlist; + unsigned long ip; + unsigned long direct; /* for direct lookup only */ +}; + +struct dyn_ftrace; + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +extern int ftrace_direct_func_count; +int register_ftrace_direct(unsigned long ip, unsigned long addr); +int unregister_ftrace_direct(unsigned long ip, unsigned long addr); +int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); +struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); +int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, + struct dyn_ftrace *rec, + unsigned long old_addr, + unsigned long new_addr); +#else +# define ftrace_direct_func_count 0 +static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) +{ + return -ENOTSUPP; +} +static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) +{ + return -ENOTSUPP; +} +static inline int modify_ftrace_direct(unsigned long ip, + unsigned long old_addr, unsigned long new_addr) +{ + return -ENOTSUPP; +} +static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) +{ + return NULL; +} +static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, + struct dyn_ftrace *rec, + unsigned long old_addr, + unsigned long new_addr) +{ + return -ENODEV; +} +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +/* + * This must be implemented by the architecture. + * It is the way the ftrace direct_ops helper, when called + * via ftrace (because there's other callbacks besides the + * direct call), can inform the architecture's trampoline that this + * routine has a direct caller, and what the caller is. + * + * For example, in x86, it returns the direct caller + * callback function via the regs->orig_ax parameter. + * Then in the ftrace trampoline, if this is set, it makes + * the return from the trampoline jump to the direct caller + * instead of going back to the function it just traced. + */ +static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, + unsigned long addr) { } +#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ + #ifdef CONFIG_STACK_TRACER extern int stack_tracer_enabled; @@ -291,8 +362,6 @@ static inline void stack_tracer_enable(void) { } int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_post_process(void); -struct dyn_ftrace; - enum ftrace_bug_type { FTRACE_BUG_UNKNOWN, FTRACE_BUG_INIT, @@ -330,6 +399,7 @@ bool is_ftrace_trampoline(unsigned long addr); * REGS_EN - the function is set up to save regs. * IPMODIFY - the record allows for the IP address to be changed. * DISABLED - the record is not ready to be touched yet + * DIRECT - there is a direct function to call * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been @@ -345,10 +415,12 @@ enum { FTRACE_FL_TRAMP_EN = (1UL << 27), FTRACE_FL_IPMODIFY = (1UL << 26), FTRACE_FL_DISABLED = (1UL << 25), + FTRACE_FL_DIRECT = (1UL << 24), + FTRACE_FL_DIRECT_EN = (1UL << 23), }; -#define FTRACE_REF_MAX_SHIFT 25 -#define FTRACE_FL_BITS 7 +#define FTRACE_REF_MAX_SHIFT 23 +#define FTRACE_FL_BITS 9 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index aa5deb041c25..fb0205d87d3c 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -125,6 +125,9 @@ extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); +extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); #ifdef CONFIG_BINARY_PRINTF extern int diff --git a/include/linux/sort.h b/include/linux/sort.h index 61b96d0ebc44..b5898725fe9d 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -5,12 +5,12 @@ #include <linux/types.h> void sort_r(void *base, size_t num, size_t size, - int (*cmp)(const void *, const void *, const void *), - void (*swap)(void *, void *, int), + cmp_r_func_t cmp_func, + swap_func_t swap_func, const void *priv); void sort(void *base, size_t num, size_t size, - int (*cmp)(const void *, const void *), - void (*swap)(void *, void *, int)); + cmp_func_t cmp_func, + swap_func_t swap_func); #endif diff --git a/include/linux/trace.h b/include/linux/trace.h index b95ffb2188ab..7fd86d3c691f 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -24,6 +24,14 @@ struct trace_export { int register_ftrace_export(struct trace_export *export); int unregister_ftrace_export(struct trace_export *export); +struct trace_array; + +void trace_printk_init_buffers(void); +int trace_array_printk(struct trace_array *tr, unsigned long ip, + const char *fmt, ...); +void trace_array_put(struct trace_array *tr); +struct trace_array *trace_array_get_by_name(const char *name); +int trace_array_destroy(struct trace_array *tr); #endif /* CONFIG_TRACING */ #endif /* _LINUX_TRACE_H */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 30a8cdcfd4a4..4c6e15605766 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -45,6 +45,11 @@ const char *trace_print_array_seq(struct trace_seq *p, const void *buf, int count, size_t el_size); +const char * +trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); + struct trace_iterator; struct trace_event; @@ -550,7 +555,8 @@ extern int trace_event_get_offsets(struct trace_event_call *call); int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(const char *system, const char *event, int set); - +int trace_array_set_clr_event(struct trace_array *tr, const char *system, + const char *event, bool enable); /* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 6609b39a7232..6c30508fca19 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -92,6 +92,10 @@ extern int trace_seq_path(struct trace_seq *s, const struct path *path); extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits); +extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); + #else /* CONFIG_TRACING */ static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { diff --git a/include/linux/types.h b/include/linux/types.h index 05030f608be3..85c0e7b18153 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -225,5 +225,10 @@ struct callback_head { typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); +typedef void (*swap_func_t)(void *a, void *b, int size); + +typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); +typedef int (*cmp_func_t)(const void *a, const void *b); + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 4ecdfe2e3580..7089760d4c7a 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -340,6 +340,12 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_array_seq(p, array, count, el_size); \ }) +#undef __print_hex_dump +#define __print_hex_dump(prefix_str, prefix_type, \ + rowsize, groupsize, buf, len, ascii) \ + trace_print_hex_dump_seq(p, prefix_str, prefix_type, \ + rowsize, groupsize, buf, len, ascii) + #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace enum print_line_t \ |