summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 19:58:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 19:58:13 -0800
commit1dd7dcb6eaa677b034e7ef63df8320277507ae70 (patch)
tree3f1592b634d7bdde94e00570925be2dade8433d4 /include
parentb6da0076bab5a12afb19312ffee41c95490af2a0 (diff)
parent3558a5ac50dbb2419cc649d5e154af161d661037 (diff)
downloadlinux-1dd7dcb6eaa677b034e7ef63df8320277507ae70.tar.bz2
Merge tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "There was a lot of clean ups and minor fixes. One of those clean ups was to the trace_seq code. It also removed the return values to the trace_seq_*() functions and use trace_seq_has_overflowed() to see if the buffer filled up or not. This is similar to work being done to the seq_file code as well in another tree. Some of the other goodies include: - Added some "!" (NOT) logic to the tracing filter. - Fixed the frame pointer logic to the x86_64 mcount trampolines - Added the logic for dynamic trampolines on !CONFIG_PREEMPT systems. That is, the ftrace trampoline can be dynamically allocated and be called directly by functions that only have a single hook to them" * tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (55 commits) tracing: Truncated output is better than nothing tracing: Add additional marks to signal very large time deltas Documentation: describe trace_buf_size parameter more accurately tracing: Allow NOT to filter AND and OR clauses tracing: Add NOT to filtering logic ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter ftrace/x86: Get rid of ftrace_caller_setup ftrace/x86: Have save_mcount_regs macro also save stack frames if needed ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs ftrace/x86: Simplify save_mcount_regs on getting RIP ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file ftrace/x86: Have static tracing also use ftrace_caller_setup ftrace/x86: Have static function tracing always test for function graph kprobes: Add IPMODIFY flag to kprobe_ftrace_ops ftrace, kprobes: Support IPMODIFY flag to find IP modify conflict kprobes/ftrace: Recover original IP if pre_handler doesn't change it tracing/trivial: Fix typos and make an int into a bool tracing: Deletion of an unnecessary check before iput() ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace.h36
-rw-r--r--include/linux/ftrace_event.h11
-rw-r--r--include/linux/trace_seq.h49
-rw-r--r--include/trace/ftrace.h8
4 files changed, 74 insertions, 30 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 662697babd48..ed501953f0b2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * IPMODIFY are a kind of attribute flags which can be set only before
+ * registering the ftrace_ops, and can not be modified while registered.
+ * Changing those attribute flags after regsitering ftrace_ops will
+ * cause unexpected results.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
@@ -94,6 +99,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ADDING - The ops is in the process of being added.
* REMOVING - The ops is in the process of being removed.
* MODIFYING - The ops is in the process of changing its filter functions.
+ * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
+ * The arch specific code sets this flag when it allocated a
+ * trampoline. This lets the arch know that it can update the
+ * trampoline in case the callback function changes.
+ * The ftrace_ops trampoline can be set by the ftrace users, and
+ * in such cases the arch must not modify it. Only the arch ftrace
+ * core code should set this flag.
+ * IPMODIFY - The ops can modify the IP register. This can only be set with
+ * SAVE_REGS. If another ops with this flag set is already registered
+ * for any of the functions that this ops will be registered for, then
+ * this ops will fail to register or set_filter_ip.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -108,6 +124,8 @@ enum {
FTRACE_OPS_FL_ADDING = 1 << 9,
FTRACE_OPS_FL_REMOVING = 1 << 10,
FTRACE_OPS_FL_MODIFYING = 1 << 11,
+ FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
+ FTRACE_OPS_FL_IPMODIFY = 1 << 13,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -142,6 +160,7 @@ struct ftrace_ops {
struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash;
unsigned long trampoline;
+ unsigned long trampoline_size;
#endif
};
@@ -255,7 +274,9 @@ struct ftrace_func_command {
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
-void ftrace_bug(int err, unsigned long ip);
+struct dyn_ftrace;
+
+void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file;
@@ -287,6 +308,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
+bool is_ftrace_trampoline(unsigned long addr);
+
/*
* The dyn_ftrace record's flags field is split into two parts.
* the first part which is '0-FTRACE_REF_MAX' is a counter of
@@ -297,6 +320,7 @@ extern int ftrace_nr_registered_ops(void);
* ENABLED - the function is being traced
* REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs.
+ * IPMODIFY - the record allows for the IP address to be changed.
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
@@ -310,10 +334,11 @@ enum {
FTRACE_FL_REGS_EN = (1UL << 29),
FTRACE_FL_TRAMP = (1UL << 28),
FTRACE_FL_TRAMP_EN = (1UL << 27),
+ FTRACE_FL_IPMODIFY = (1UL << 26),
};
-#define FTRACE_REF_MAX_SHIFT 27
-#define FTRACE_FL_BITS 5
+#define FTRACE_REF_MAX_SHIFT 26
+#define FTRACE_FL_BITS 6
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
@@ -586,6 +611,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user
size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
+
+static inline bool is_ftrace_trampoline(unsigned long addr)
+{
+ return false;
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 28672e87e910..0bebb5c348b8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -138,6 +138,17 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
+/*
+ * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
+ * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
+ * simplifies those functions and keeps them in sync.
+ */
+static inline enum print_line_t trace_handle_return(struct trace_seq *s)
+{
+ return trace_seq_has_overflowed(s) ?
+ TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
+}
+
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index ea6c9dea79e3..db8a73224f1a 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -40,45 +40,54 @@ trace_seq_buffer_ptr(struct trace_seq *s)
return s->buffer + s->len;
}
+/**
+ * trace_seq_has_overflowed - return true if the trace_seq took too much
+ * @s: trace sequence descriptor
+ *
+ * Returns true if too much data was added to the trace_seq and it is
+ * now full and will not take anymore.
+ */
+static inline bool trace_seq_has_overflowed(struct trace_seq *s)
+{
+ return s->full || s->len > PAGE_SIZE - 1;
+}
+
/*
* Currently only defined when tracing is enabled.
*/
#ifdef CONFIG_TRACING
extern __printf(2, 3)
-int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
+void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __printf(2, 0)
-int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
-extern int
+void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
+extern void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
int cnt);
-extern int trace_seq_puts(struct trace_seq *s, const char *str);
-extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
-extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
-extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+extern void trace_seq_puts(struct trace_seq *s, const char *str);
+extern void trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
+extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len);
extern int trace_seq_path(struct trace_seq *s, const struct path *path);
-extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
+extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits);
#else /* CONFIG_TRACING */
-static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
- return 0;
}
-static inline int
+static inline void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{
- return 0;
}
-static inline int
+static inline void
trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits)
{
- return 0;
}
static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
@@ -90,23 +99,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
{
return 0;
}
-static inline int trace_seq_puts(struct trace_seq *s, const char *str)
+static inline void trace_seq_puts(struct trace_seq *s, const char *str)
{
- return 0;
}
-static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
+static inline void trace_seq_putc(struct trace_seq *s, unsigned char c)
{
- return 0;
}
-static inline int
+static inline void
trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
{
- return 0;
}
-static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len)
{
- return 0;
}
static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
{
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 26b4f2e13275..139b5067345b 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
field = (typeof(field))iter->ent; \
\
ret = ftrace_raw_output_prep(iter, trace_event); \
- if (ret) \
+ if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
- ret = trace_seq_printf(s, print); \
- if (!ret) \
- return TRACE_TYPE_PARTIAL_LINE; \
+ trace_seq_printf(s, print); \
\
- return TRACE_TYPE_HANDLED; \
+ return trace_handle_return(s); \
} \
static struct trace_event_functions ftrace_event_type_funcs_##call = { \
.trace = ftrace_raw_output_##call, \