diff options
| author | Andrew Vagin <avagin@openvz.org> | 2012-07-11 18:14:58 +0400 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2012-07-31 17:02:05 +0200 | 
| commit | e6dab5ffab59e910ec0e3355f4a6f29f7a7be474 (patch) | |
| tree | 87acf0fb071b8d09794ac7d834cb256de030cceb /kernel | |
| parent | d07bdfd322d307789f15b427dbcc39257665356f (diff) | |
| download | linux-e6dab5ffab59e910ec0e3355f4a6f29f7a7be474.tar.bz2 | |
perf/trace: Add ability to set a target task for events
A few events are interesting not only for a current task.
For example, sched_stat_* events are interesting for a task
which wakes up. For this reason, it will be good if such
events will be delivered to a target task too.
Now a target task can be set by using __perf_task().
The original idea and a draft patch belongs to Peter Zijlstra.
I need these events for profiling sleep times. sched_switch is used for
getting callchains and sched_stat_* is used for getting time periods.
These events are combined in user space, then it can be analyzed by
perf tools.
Inspired-by: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arun Sharma <asharma@fb.com>
Signed-off-by: Andrew Vagin <avagin@openvz.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1342016098-213063-1-git-send-email-avagin@openvz.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/callchain.c | 9 | ||||
| -rw-r--r-- | kernel/events/core.c | 30 | ||||
| -rw-r--r-- | kernel/events/internal.h | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 2 | 
7 files changed, 46 insertions, 10 deletions
| diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 6581a040f399..98d4597f43d6 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -153,7 +153,8 @@ put_callchain_entry(int rctx)  	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);  } -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +struct perf_callchain_entry * +perf_callchain(struct perf_event *event, struct pt_regs *regs)  {  	int rctx;  	struct perf_callchain_entry *entry; @@ -178,6 +179,12 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)  	}  	if (regs) { +		/* +		 * Disallow cross-task user callchains. +		 */ +		if (event->ctx->task && event->ctx->task != current) +			goto exit_put; +  		perf_callchain_store(entry, PERF_CONTEXT_USER);  		perf_callchain_user(entry, regs);  	} diff --git a/kernel/events/core.c b/kernel/events/core.c index f1cf0edeb39a..b7935fcec7d9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4039,7 +4039,7 @@ void perf_prepare_sample(struct perf_event_header *header,  	if (sample_type & PERF_SAMPLE_CALLCHAIN) {  		int size = 1; -		data->callchain = perf_callchain(regs); +		data->callchain = perf_callchain(event, regs);  		if (data->callchain)  			size += data->callchain->nr; @@ -5209,7 +5209,8 @@ static int perf_tp_event_match(struct perf_event *event,  }  void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, -		   struct pt_regs *regs, struct hlist_head *head, int rctx) +		   struct pt_regs *regs, struct hlist_head *head, int rctx, +		   struct task_struct *task)  {  	struct perf_sample_data data;  	struct perf_event *event; @@ -5228,6 +5229,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,  			perf_swevent_event(event, count, &data, regs);  	} +	/* +	 * If we got specified a target task, also iterate its context and +	 * deliver this event there too. +	 */ +	if (task && task != current) { +		struct perf_event_context *ctx; +		struct trace_entry *entry = record; + +		rcu_read_lock(); +		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); +		if (!ctx) +			goto unlock; + +		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { +			if (event->attr.type != PERF_TYPE_TRACEPOINT) +				continue; +			if (event->attr.config != entry->type) +				continue; +			if (perf_tp_event_match(event, &data, regs)) +				perf_swevent_event(event, count, &data, regs); +		} +unlock: +		rcu_read_unlock(); +	} +  	perf_swevent_put_recursion_context(rctx);  }  EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/events/internal.h b/kernel/events/internal.h index b0b107f90afc..a096c19f2c2a 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -101,7 +101,8 @@ __output_copy(struct perf_output_handle *handle,  }  /* Callchain handling */ -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); +extern struct perf_callchain_entry * +perf_callchain(struct perf_event *event, struct pt_regs *regs);  extern int get_callchain_buffers(void);  extern void put_callchain_buffers(void); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index fee3752ae8f6..8a6d2ee2086c 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -281,7 +281,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)  	head = this_cpu_ptr(event_function.perf_events);  	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, -			      1, ®s, head); +			      1, ®s, head, NULL);  #undef ENTRY_SIZE  } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b31d3d5699fe..1a2117043bb1 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,  	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);  	head = this_cpu_ptr(call->perf_events); -	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); +	perf_trace_buf_submit(entry, size, rctx, +					entry->ip, 1, regs, head, NULL);  }  /* Kretprobe profile handler */ @@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,  	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);  	head = this_cpu_ptr(call->perf_events); -	perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); +	perf_trace_buf_submit(entry, size, rctx, +					entry->ret_ip, 1, regs, head, NULL);  }  #endif	/* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 96fc73369099..60e4d7875672 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -532,7 +532,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)  			       (unsigned long *)&rec->args);  	head = this_cpu_ptr(sys_data->enter_event->perf_events); -	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); +	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);  }  int perf_sysenter_enable(struct ftrace_event_call *call) @@ -608,7 +608,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)  	rec->ret = syscall_get_return_value(current, regs);  	head = this_cpu_ptr(sys_data->exit_event->perf_events); -	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); +	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);  }  int perf_sysexit_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2b36ac68549e..03003cd7dd96 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)  		call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);  	head = this_cpu_ptr(call->perf_events); -	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); +	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);   out:  	preempt_enable(); |