summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c110
1 files changed, 93 insertions, 17 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6b11e4e2150c..8d2b98812625 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -386,16 +386,22 @@ trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
* Returns false if @task should be traced.
*/
bool
-trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
+trace_ignore_this_task(struct trace_pid_list *filtered_pids,
+ struct trace_pid_list *filtered_no_pids,
+ struct task_struct *task)
{
/*
- * Return false, because if filtered_pids does not exist,
- * all pids are good to trace.
+ * If filterd_no_pids is not empty, and the task's pid is listed
+ * in filtered_no_pids, then return true.
+ * Otherwise, if filtered_pids is empty, that means we can
+ * trace all tasks. If it has content, then only trace pids
+ * within filtered_pids.
*/
- if (!filtered_pids)
- return false;
- return !trace_find_filtered_pid(filtered_pids, task->pid);
+ return (filtered_pids &&
+ !trace_find_filtered_pid(filtered_pids, task->pid)) ||
+ (filtered_no_pids &&
+ trace_find_filtered_pid(filtered_no_pids, task->pid));
}
/**
@@ -3378,7 +3384,7 @@ static void trace_iterator_increment(struct trace_iterator *iter)
iter->idx++;
if (buf_iter)
- ring_buffer_read(buf_iter, NULL);
+ ring_buffer_iter_advance(buf_iter);
}
static struct trace_entry *
@@ -3388,11 +3394,15 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
- if (buf_iter)
+ if (buf_iter) {
event = ring_buffer_iter_peek(buf_iter, ts);
- else
+ if (lost_events)
+ *lost_events = ring_buffer_iter_dropped(buf_iter) ?
+ (unsigned long)-1 : 0;
+ } else {
event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
lost_events);
+ }
if (event) {
iter->ent_size = ring_buffer_event_length(event);
@@ -3462,11 +3472,51 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
return next;
}
+#define STATIC_TEMP_BUF_SIZE 128
+static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
+
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts)
{
- return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
+ /* __find_next_entry will reset ent_size */
+ int ent_size = iter->ent_size;
+ struct trace_entry *entry;
+
+ /*
+ * If called from ftrace_dump(), then the iter->temp buffer
+ * will be the static_temp_buf and not created from kmalloc.
+ * If the entry size is greater than the buffer, we can
+ * not save it. Just return NULL in that case. This is only
+ * used to add markers when two consecutive events' time
+ * stamps have a large delta. See trace_print_lat_context()
+ */
+ if (iter->temp == static_temp_buf &&
+ STATIC_TEMP_BUF_SIZE < ent_size)
+ return NULL;
+
+ /*
+ * The __find_next_entry() may call peek_next_entry(), which may
+ * call ring_buffer_peek() that may make the contents of iter->ent
+ * undefined. Need to copy iter->ent now.
+ */
+ if (iter->ent && iter->ent != iter->temp) {
+ if ((!iter->temp || iter->temp_size < iter->ent_size) &&
+ !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
+ kfree(iter->temp);
+ iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
+ if (!iter->temp)
+ return NULL;
+ }
+ memcpy(iter->temp, iter->ent, iter->ent_size);
+ iter->temp_size = iter->ent_size;
+ iter->ent = iter->temp;
+ }
+ entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
+ /* Put back the original ent_size */
+ iter->ent_size = ent_size;
+
+ return entry;
}
/* Find the next real entry, and increment the iterator to the next entry */
@@ -3538,7 +3588,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
if (ts >= iter->array_buffer->time_start)
break;
entries++;
- ring_buffer_read(buf_iter, NULL);
+ ring_buffer_iter_advance(buf_iter);
}
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
@@ -3981,8 +4031,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
enum print_line_t ret;
if (iter->lost_events) {
- trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
- iter->cpu, iter->lost_events);
+ if (iter->lost_events == (unsigned long)-1)
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
+ iter->cpu);
+ else
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
+ iter->cpu, iter->lost_events);
if (trace_seq_has_overflowed(&iter->seq))
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -4198,6 +4252,18 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
goto release;
/*
+ * trace_find_next_entry() may need to save off iter->ent.
+ * It will place it into the iter->temp buffer. As most
+ * events are less than 128, allocate a buffer of that size.
+ * If one is greater, then trace_find_next_entry() will
+ * allocate a new buffer to adjust for the bigger iter->ent.
+ * It's not critical if it fails to get allocated here.
+ */
+ iter->temp = kmalloc(128, GFP_KERNEL);
+ if (iter->temp)
+ iter->temp_size = 128;
+
+ /*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
@@ -4237,8 +4303,11 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
- /* stop the trace while dumping if we are not opening "snapshot" */
- if (!iter->snapshot)
+ /*
+ * If pause-on-trace is enabled, then stop the trace while
+ * dumping, unless this is the "snapshot" file
+ */
+ if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
tracing_stop_tr(tr);
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
@@ -4269,6 +4338,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
+ kfree(iter->temp);
kfree(iter->buffer_iter);
release:
seq_release_private(inode, file);
@@ -4334,7 +4404,7 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
- if (!iter->snapshot)
+ if (!iter->snapshot && tr->stop_count)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
@@ -4344,6 +4414,7 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
+ kfree(iter->temp);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
@@ -4964,6 +5035,8 @@ static const char readme_msg[] =
#ifdef CONFIG_FUNCTION_TRACER
" set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
"\t\t (function)\n"
+ " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
+ "\t\t (function)\n"
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
" set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
@@ -9146,6 +9219,9 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
/* Simulate the iterator */
trace_init_global_iter(&iter);
+ /* Can not use kmalloc for iter.temp */
+ iter.temp = static_temp_buf;
+ iter.temp_size = STATIC_TEMP_BUF_SIZE;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
@@ -9334,7 +9410,7 @@ __init static int tracer_alloc_buffers(void)
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
/* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers();