From 3f6da3905398826d85731247e7fbcf53400c18bd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Mar 2010 13:01:18 +0100 Subject: perf: Rework and fix the arch CPU-hotplug hooks Remove the hw_perf_event_*() hotplug hooks in favour of per PMU hotplug notifiers. This has the advantage of reducing the static weak interface as well as exposing all hotplug actions to the PMU. Use this to fix x86 hotplug usage where we did things in ONLINE which should have been done in UP_PREPARE or STARTING. Signed-off-by: Peter Zijlstra Cc: Paul Mundt Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100305154128.736225361@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4393b9e73740..73329dedb5ad 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } -void __weak hw_perf_event_setup(int cpu) { barrier(); } -void __weak hw_perf_event_setup_online(int cpu) { barrier(); } -void __weak hw_perf_event_setup_offline(int cpu) { barrier(); } - int __weak hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, @@ -5382,8 +5378,6 @@ static void __cpuinit perf_event_init_cpu(int cpu) spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; spin_unlock(&perf_resource_lock); - - hw_perf_event_setup(cpu); } #ifdef CONFIG_HOTPLUG_CPU @@ -5423,20 +5417,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) perf_event_init_cpu(cpu); break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hw_perf_event_setup_online(cpu); - break; - case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: perf_event_exit_cpu(cpu); break; - case CPU_DEAD: - hw_perf_event_setup_offline(cpu); - break; - default: break; } -- cgit v1.2.3 From 32975a4f114be52286f9a5bf6c230dbb8c0e1903 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 19:49:19 +0100 Subject: perf: Optimize perf_disable Currently we always call hw_perf_disable(), even if its already disabled, this seems superflous, esp. since it cannot be made NMI safe (see further patches). Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 73329dedb5ad..d8108465397d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -93,25 +93,15 @@ void __weak perf_event_print_debug(void) { } static DEFINE_PER_CPU(int, perf_disable_count); -void __perf_disable(void) -{ - __get_cpu_var(perf_disable_count)++; -} - -bool __perf_enable(void) -{ - return !--__get_cpu_var(perf_disable_count); -} - void perf_disable(void) { - __perf_disable(); - hw_perf_disable(); + if (!__get_cpu_var(perf_disable_count)++) + hw_perf_disable(); } void perf_enable(void) { - if (__perf_enable()) + if (!--__get_cpu_var(perf_disable_count)) hw_perf_enable(); } -- cgit v1.2.3 From d4944a06666054707d23e11888e480af239e5abf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 13:51:20 +0100 Subject: perf: Provide better condition for event rotation Try to avoid useless rotation and PMU disables. [ Could be improved by keeping a nr_runnable count to better account for the < PERF_STAT_INACTIVE counters ] Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d8108465397d..52c69a34d697 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); + perf_disable(); event->pmu->unthrottle(event); + perf_enable(); } if (!event->attr.freq || !event->attr.sample_freq) continue; + perf_disable(); event->pmu->read(event); now = atomic64_read(&event->count); delta = now - hwc->freq_count_stamp; @@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) if (delta > 0) perf_adjust_period(event, TICK_NSEC, delta); + perf_enable(); } raw_spin_unlock(&ctx->lock); } @@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ static void rotate_ctx(struct perf_event_context *ctx) { - if (!ctx->nr_events) - return; - raw_spin_lock(&ctx->lock); /* Rotate the first entry last of non-pinned groups */ @@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; + int rotate = 0; if (!atomic_read(&nr_events)) return; cpuctx = &__get_cpu_var(perf_cpu_context); - ctx = curr->perf_event_ctxp; + if (cpuctx->ctx.nr_events && + cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) + rotate = 1; - perf_disable(); + ctx = curr->perf_event_ctxp; + if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) + rotate = 1; perf_ctx_adjust_freq(&cpuctx->ctx); if (ctx) perf_ctx_adjust_freq(ctx); + if (!rotate) + return; + + perf_disable(); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_out(ctx, EVENT_FLEXIBLE); @@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_in(curr, EVENT_FLEXIBLE); - perf_enable(); } -- cgit v1.2.3 From db2c4c7791cd04512093d05afc693c3511a65fd7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 2 Feb 2010 23:34:40 +0100 Subject: lockdep: Move lock events under lockdep recursion protection There are rcu locked read side areas in the path where we submit a trace event. And these rcu_read_(un)lock() trigger lock events, which create recursive events. One pair in do_perf_sw_event: __lock_acquire | |--96.11%-- lock_acquire | | | |--27.21%-- do_perf_sw_event | | perf_tp_event | | | | | |--49.62%-- ftrace_profile_lock_release | | | lock_release | | | | | | | |--33.85%-- _raw_spin_unlock Another pair in perf_output_begin/end: __lock_acquire |--23.40%-- perf_output_begin | | __perf_event_overflow | | perf_swevent_overflow | | perf_swevent_add | | perf_swevent_ctx_event | | do_perf_sw_event | | perf_tp_event | | | | | |--55.37%-- ftrace_profile_lock_acquire | | | lock_acquire | | | | | | | |--37.31%-- _raw_spin_lock The problem is not that much the trace recursion itself, as we have a recursion protection already (though it's always wasteful to recurse). But the trace events are outside the lockdep recursion protection, then each lockdep event triggers a lock trace, which will trigger two other lockdep events. Here the recursive lock trace event won't be taken because of the trace recursion, so the recursion stops there but lockdep will still analyse these new events: To sum up, for each lockdep events we have: lock_*() | trace lock_acquire | ----- rcu_read_lock() | | | lock_acquire() | | | trace_lock_acquire() (stopped) | | | lockdep analyze | ----- rcu_read_unlock() | lock_release | trace_lock_release() (stopped) | lockdep analyze And you can repeat the above two times as we have two rcu read side sections when we submit an event. This is fixed in this patch by moving the lock trace event under the lockdep recursion protection. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Hitoshi Mitake Cc: Li Zefan Cc: Lai Jiangshan Cc: Masami Hiramatsu Cc: Jens Axboe --- kernel/lockdep.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0c30d0455de1..65b5f5b7c298 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, { unsigned long flags; - trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); - if (unlikely(current->lockdep_recursion)) return; @@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, check_flags(flags); current->lockdep_recursion = 1; + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0); current->lockdep_recursion = 0; @@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested, { unsigned long flags; - trace_lock_release(lock, nested, ip); - if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; + trace_lock_release(lock, nested, ip); __lock_release(lock, nested, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); @@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; - trace_lock_contended(lock, ip); - if (unlikely(!lock_stat)) return; @@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; + trace_lock_contended(lock, ip); __lock_contended(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); -- cgit v1.2.3 From 5331d7b84613b8325362dde53dc2bff2fb87d351 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Mar 2010 21:15:56 +0100 Subject: perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot Events that trigger overflows by interrupting a context can use get_irq_regs() or task_pt_regs() to retrieve the state when the event triggered. But this is not the case for some other class of events like trace events as tracepoints are executed in the same context than the code that triggered the event. It means we need a different api to capture the regs there, namely we need a hot snapshot to get the most important informations for perf: the instruction pointer to get the event origin, the frame pointer for the callchain, the code segment for user_mode() tests (we always use __KERNEL_CS as trace events always occur from the kernel) and the eflags for further purposes. v2: rename perf_save_regs to perf_fetch_caller_regs as per Masami's suggestion. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs --- arch/x86/kernel/cpu/perf_event.c | 12 ++++++++++++ arch/x86/kernel/dumpstack.h | 15 ++++++++++++++ include/linux/perf_event.h | 42 +++++++++++++++++++++++++++++++++++++++- kernel/perf_event.c | 5 +++++ 4 files changed, 73 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1d665a0b202c..c6bde7d7afdc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1707,3 +1707,15 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } + +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ + regs->ip = ip; + /* + * perf_arch_fetch_caller_regs adds another call, we need to increment + * the skip level + */ + regs->bp = rewind_frame_pointer(skip + 1); + regs->cs = __KERNEL_CS; + local_save_flags(regs->flags); +} diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 4fd1420faffa..29e5f7c845b2 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h @@ -29,4 +29,19 @@ struct stack_frame { struct stack_frame *next_frame; unsigned long return_address; }; + +static inline unsigned long rewind_frame_pointer(int n) +{ + struct stack_frame *frame; + + get_bp(frame); + +#ifdef CONFIG_FRAME_POINTER + while (n--) + frame = frame->next_frame; #endif + + return (unsigned long)frame; +} + +#endif /* DUMPSTACK_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 80acbf3d5de1..70cffd052c04 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -452,6 +452,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 @@ -847,6 +848,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) __perf_sw_event(event_id, nr, nmi, regs, addr); } +extern void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); + +/* + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: + * - ip for PERF_SAMPLE_IP + * - cs for user_mode() tests + * - bp for callchains + * - eflags, for future purposes, just in case + */ +static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +{ + unsigned long ip; + + memset(regs, 0, sizeof(*regs)); + + switch (skip) { + case 1 : + ip = CALLER_ADDR0; + break; + case 2 : + ip = CALLER_ADDR1; + break; + case 3 : + ip = CALLER_ADDR2; + break; + case 4: + ip = CALLER_ADDR3; + break; + /* No need to support further for now */ + default: + ip = 0; + } + + return perf_arch_fetch_caller_regs(regs, ip, skip); +} + extern void __perf_event_mmap(struct vm_area_struct *vma); static inline void perf_event_mmap(struct vm_area_struct *vma) @@ -880,7 +919,8 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); +extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 52c69a34d697..359d7f690c2b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,6 +2786,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +__weak +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ +} + /* * Output */ -- cgit v1.2.3 From c530665c31c0140b74ca7689e7f836177796e5bd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 07:16:16 +0100 Subject: perf: Take a hot regs snapshot for trace events We are taking a wrong regs snapshot when a trace event triggers. Either we use get_irq_regs(), which gives us the interrupted registers if we are in an interrupt, or we use task_pt_regs() which gives us the state before we entered the kernel, assuming we are lucky enough to be no kernel thread, in which case task_pt_regs() returns the initial set of regs when the kernel thread was started. What we want is different. We need a hot snapshot of the regs, so that we can get the instruction pointer to record in the sample, the frame pointer for the callchain, and some other things. Let's use the new perf_fetch_caller_regs() for that. Comparison with perf record -e lock: -R -a -f -g Before: perf [kernel] [k] __do_softirq | --- __do_softirq | |--55.16%-- __open | --44.84%-- __write_nocancel After: perf [kernel] [k] perf_tp_event | --- perf_tp_event | |--41.07%-- lock_acquire | | | |--39.36%-- _raw_spin_lock | | | | | |--7.81%-- hrtimer_interrupt | | | smp_apic_timer_interrupt | | | apic_timer_interrupt The old case was producing unreliable callchains. Now having right frame and instruction pointers, we have the trace we want. Also syscalls and kprobe events already have the right regs, let's use them instead of wasting a retrieval. v2: Follow the rename perf_save_regs() -> perf_fetch_caller_regs() Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs --- include/linux/ftrace_event.h | 7 +++++-- include/trace/ftrace.h | 6 +++++- kernel/perf_event.c | 8 ++------ kernel/trace/trace_event_profile.c | 3 ++- kernel/trace/trace_kprobe.c | 5 +++-- kernel/trace/trace_syscalls.c | 4 ++-- 6 files changed, 19 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6b7c444ab8f6..ac424f18ce63 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -187,6 +187,9 @@ do { \ #ifdef CONFIG_PERF_EVENTS struct perf_event; + +DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); + extern int ftrace_profile_enable(int event_id); extern void ftrace_profile_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, @@ -198,11 +201,11 @@ ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, static inline void ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags) + u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; - perf_tp_event(entry->type, addr, count, raw_data, size); + perf_tp_event(entry->type, addr, count, raw_data, size, regs); perf_swevent_put_recursion_context(rctx); local_irq_restore(irq_flags); } diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0804cd594803..f31bb8b9777c 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -764,6 +764,7 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct pt_regs *__regs; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -784,8 +785,11 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ \ { assign; } \ \ + __regs = &__get_cpu_var(perf_trace_regs); \ + perf_fetch_caller_regs(__regs, 2); \ + \ ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags); \ + __count, irq_flags, __regs); \ } #undef DEFINE_EVENT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 359d7f690c2b..45b4b6e55891 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4318,9 +4318,8 @@ static const struct pmu perf_ops_task_clock = { #ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size) + int entry_size, struct pt_regs *regs) { - struct pt_regs *regs = get_irq_regs(); struct perf_sample_data data; struct perf_raw_record raw = { .size = entry_size, @@ -4330,12 +4329,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - if (!regs) - regs = task_pt_regs(current); - /* Trace events already protected against recursion */ do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); + &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index f0d693005075..e66d21e15a0f 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -2,13 +2,14 @@ * trace event based perf counter profiling * * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * + * Copyright (C) 2009-2010 Frederic Weisbecker */ #include #include #include "trace.h" +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c92273b1a..f7a20a8bfb31 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1240,7 +1240,7 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ @@ -1271,7 +1271,8 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + irq_flags, regs); } static int probe_profile_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..7e6e84fb7b6c 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -467,7 +467,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysenter_enable(struct ftrace_event_call *call) @@ -542,7 +542,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysexit_enable(struct ftrace_event_call *call) -- cgit v1.2.3 From 97d5a22005f38057b4bc0d95f81cd26510268794 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 5 Mar 2010 05:35:37 +0100 Subject: perf: Drop the obsolete profile naming for trace events Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Jason Baron --- include/linux/ftrace_event.h | 16 ++-- include/linux/syscalls.h | 24 +++--- include/trace/ftrace.h | 38 ++++----- include/trace/syscall.h | 8 +- kernel/perf_event.c | 4 +- kernel/trace/Makefile | 2 +- kernel/trace/trace_event_perf.c | 165 +++++++++++++++++++++++++++++++++++++ kernel/trace/trace_event_profile.c | 165 ------------------------------------- kernel/trace/trace_events.c | 2 +- kernel/trace/trace_kprobe.c | 28 +++---- kernel/trace/trace_syscalls.c | 72 ++++++++-------- 11 files changed, 262 insertions(+), 262 deletions(-) create mode 100644 kernel/trace/trace_event_perf.c delete mode 100644 kernel/trace/trace_event_profile.c (limited to 'kernel') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ac424f18ce63..c0f4b364c711 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -131,12 +131,12 @@ struct ftrace_event_call { void *mod; void *data; - int profile_count; - int (*profile_enable)(struct ftrace_event_call *); - void (*profile_disable)(struct ftrace_event_call *); + int perf_refcount; + int (*perf_event_enable)(struct ftrace_event_call *); + void (*perf_event_disable)(struct ftrace_event_call *); }; -#define FTRACE_MAX_PROFILE_SIZE 2048 +#define PERF_MAX_TRACE_SIZE 2048 #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ @@ -190,17 +190,17 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int ftrace_profile_enable(int event_id); -extern void ftrace_profile_disable(int event_id); +extern int perf_trace_enable(int event_id); +extern void perf_trace_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); extern void * -ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, +perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, unsigned long *irq_flags); static inline void -ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8126f239edf0..51435bcc3460 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -101,18 +101,18 @@ struct perf_event_attr; #ifdef CONFIG_PERF_EVENTS -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ - .profile_enable = prof_sysenter_enable, \ - .profile_disable = prof_sysenter_disable, +#define TRACE_SYS_ENTER_PERF_INIT(sname) \ + .perf_event_enable = perf_sysenter_enable, \ + .perf_event_disable = perf_sysenter_disable, -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ - .profile_enable = prof_sysexit_enable, \ - .profile_disable = prof_sysexit_disable, +#define TRACE_SYS_EXIT_PERF_INIT(sname) \ + .perf_event_enable = perf_sysexit_enable, \ + .perf_event_disable = perf_sysexit_disable, #else -#define TRACE_SYS_ENTER_PROFILE(sname) -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) -#define TRACE_SYS_EXIT_PROFILE(sname) -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) +#define TRACE_SYS_ENTER_PERF(sname) +#define TRACE_SYS_ENTER_PERF_INIT(sname) +#define TRACE_SYS_EXIT_PERF(sname) +#define TRACE_SYS_EXIT_PERF_INIT(sname) #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_SYSCALLS @@ -149,7 +149,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_enter, \ .unregfunc = unreg_event_syscall_enter, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PROFILE_INIT(sname) \ + TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ @@ -171,7 +171,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_exit, \ .unregfunc = unreg_event_syscall_exit, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PROFILE_INIT(sname) \ + TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f31bb8b9777c..25ab56f75d65 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ \ -static void ftrace_profile_##name(proto); \ +static void perf_trace_##name(proto); \ \ static notrace int \ -ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ +perf_trace_enable_##name(struct ftrace_event_call *unused) \ { \ - return register_trace_##name(ftrace_profile_##name); \ + return register_trace_##name(perf_trace_##name); \ } \ \ static notrace void \ -ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ +perf_trace_disable_##name(struct ftrace_event_call *unused) \ { \ - unregister_trace_##name(ftrace_profile_##name); \ + unregister_trace_##name(perf_trace_##name); \ } #undef DEFINE_EVENT_PRINT @@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ #ifdef CONFIG_PERF_EVENTS -#define _TRACE_PROFILE_INIT(call) \ - .profile_enable = ftrace_profile_enable_##call, \ - .profile_disable = ftrace_profile_disable_##call, +#define _TRACE_PERF_INIT(call) \ + .perf_event_enable = perf_trace_enable_##call, \ + .perf_event_disable = perf_trace_disable_##call, #else -#define _TRACE_PROFILE_INIT(call) +#define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ #undef __entry @@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##template, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #undef DEFINE_EVENT_PRINT @@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##call, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Define the insertion callback to profile events + * Define the insertion callback to perf events * * The job is very similar to ftrace_raw_event_ except that we don't * insert in the ring buffer but in a perf counter. * - * static void ftrace_profile_(proto) + * static void ftrace_perf_(proto) * { * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_; @@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ +perf_trace_templ_##call(struct ftrace_event_call *event_call, \ proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ @@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ "profile buffer not large enough")) \ return; \ - entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ + entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ __entry_size, event_call->id, &rctx, &irq_flags); \ if (!entry) \ return; \ @@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ __regs = &__get_cpu_var(perf_trace_regs); \ perf_fetch_caller_regs(__regs, 2); \ \ - ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ __count, irq_flags, __regs); \ } #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ -static notrace void ftrace_profile_##call(proto) \ +static notrace void perf_trace_##call(proto) \ { \ struct ftrace_event_call *event_call = &event_##call; \ \ - ftrace_profile_templ_##template(event_call, args); \ + perf_trace_templ_##template(event_call, args); \ } #undef DEFINE_EVENT_PRINT diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100752f0..e5e5f48dbfb3 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif #ifdef CONFIG_PERF_EVENTS -int prof_sysenter_enable(struct ftrace_event_call *call); -void prof_sysenter_disable(struct ftrace_event_call *call); -int prof_sysexit_enable(struct ftrace_event_call *call); -void prof_sysexit_disable(struct ftrace_event_call *call); +int perf_sysenter_enable(struct ftrace_event_call *call); +void perf_sysenter_disable(struct ftrace_event_call *call); +int perf_sysexit_enable(struct ftrace_event_call *call); +void perf_sysexit_disable(struct ftrace_event_call *call); #endif #endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b4b6e55891..c502b18594cc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event, static void tp_perf_event_destroy(struct perf_event *event) { - ftrace_profile_disable(event->attr.config); + perf_trace_disable(event->attr.config); } static const struct pmu *tp_perf_event_init(struct perf_event *event) @@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (ftrace_profile_enable(event->attr.config)) + if (perf_trace_enable(event->attr.config)) return NULL; event->destroy = tp_perf_event_destroy; diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o ifeq ($(CONFIG_PERF_EVENTS),y) -obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o +obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c new file mode 100644 index 000000000000..f315b12a41d8 --- /dev/null +++ b/kernel/trace/trace_event_perf.c @@ -0,0 +1,165 @@ +/* + * trace event based perf event profiling/tracing + * + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra + * Copyright (C) 2009-2010 Frederic Weisbecker + */ + +#include +#include +#include "trace.h" + +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); + +static char *perf_trace_buf; +static char *perf_trace_buf_nmi; + +typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; + +/* Count the events in use (per event id, not per instance) */ +static int total_ref_count; + +static int perf_trace_event_enable(struct ftrace_event_call *event) +{ + char *buf; + int ret = -ENOMEM; + + if (event->perf_refcount++ > 0) + return 0; + + if (!total_ref_count) { + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(perf_trace_buf, buf); + + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(perf_trace_buf_nmi, buf); + } + + ret = event->perf_event_enable(event); + if (!ret) { + total_ref_count++; + return 0; + } + +fail_buf_nmi: + if (!total_ref_count) { + free_percpu(perf_trace_buf_nmi); + free_percpu(perf_trace_buf); + perf_trace_buf_nmi = NULL; + perf_trace_buf = NULL; + } +fail_buf: + event->perf_refcount--; + + return ret; +} + +int perf_trace_enable(int event_id) +{ + struct ftrace_event_call *event; + int ret = -EINVAL; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id && event->perf_event_enable && + try_module_get(event->mod)) { + ret = perf_trace_event_enable(event); + break; + } + } + mutex_unlock(&event_mutex); + + return ret; +} + +static void perf_trace_event_disable(struct ftrace_event_call *event) +{ + char *buf, *nmi_buf; + + if (--event->perf_refcount > 0) + return; + + event->perf_event_disable(event); + + if (!--total_ref_count) { + buf = perf_trace_buf; + rcu_assign_pointer(perf_trace_buf, NULL); + + nmi_buf = perf_trace_buf_nmi; + rcu_assign_pointer(perf_trace_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } +} + +void perf_trace_disable(int event_id) +{ + struct ftrace_event_call *event; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id) { + perf_trace_event_disable(event); + module_put(event->mod); + break; + } + } + mutex_unlock(&event_mutex); +} + +__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, + int *rctxp, unsigned long *irq_flags) +{ + struct trace_entry *entry; + char *trace_buf, *raw_data; + int pc, cpu; + + pc = preempt_count(); + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(*irq_flags); + + *rctxp = perf_swevent_get_recursion_context(); + if (*rctxp < 0) + goto err_recursion; + + cpu = smp_processor_id(); + + if (in_nmi()) + trace_buf = rcu_dereference(perf_trace_buf_nmi); + else + trace_buf = rcu_dereference(perf_trace_buf); + + if (!trace_buf) + goto err; + + raw_data = per_cpu_ptr(trace_buf, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + entry = (struct trace_entry *)raw_data; + tracing_generic_entry_update(entry, *irq_flags, pc); + entry->type = type; + + return raw_data; +err: + perf_swevent_put_recursion_context(*rctxp); +err_recursion: + local_irq_restore(*irq_flags); + return NULL; +} +EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c deleted file mode 100644 index e66d21e15a0f..000000000000 --- a/kernel/trace/trace_event_profile.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * trace event based perf counter profiling - * - * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * Copyright (C) 2009-2010 Frederic Weisbecker - */ - -#include -#include -#include "trace.h" - -DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); - -static char *perf_trace_buf; -static char *perf_trace_buf_nmi; - -typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; - -/* Count the events in use (per event id, not per instance) */ -static int total_profile_count; - -static int ftrace_profile_enable_event(struct ftrace_event_call *event) -{ - char *buf; - int ret = -ENOMEM; - - if (event->profile_count++ > 0) - return 0; - - if (!total_profile_count) { - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf; - - rcu_assign_pointer(perf_trace_buf, buf); - - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf_nmi; - - rcu_assign_pointer(perf_trace_buf_nmi, buf); - } - - ret = event->profile_enable(event); - if (!ret) { - total_profile_count++; - return 0; - } - -fail_buf_nmi: - if (!total_profile_count) { - free_percpu(perf_trace_buf_nmi); - free_percpu(perf_trace_buf); - perf_trace_buf_nmi = NULL; - perf_trace_buf = NULL; - } -fail_buf: - event->profile_count--; - - return ret; -} - -int ftrace_profile_enable(int event_id) -{ - struct ftrace_event_call *event; - int ret = -EINVAL; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->profile_enable && - try_module_get(event->mod)) { - ret = ftrace_profile_enable_event(event); - break; - } - } - mutex_unlock(&event_mutex); - - return ret; -} - -static void ftrace_profile_disable_event(struct ftrace_event_call *event) -{ - char *buf, *nmi_buf; - - if (--event->profile_count > 0) - return; - - event->profile_disable(event); - - if (!--total_profile_count) { - buf = perf_trace_buf; - rcu_assign_pointer(perf_trace_buf, NULL); - - nmi_buf = perf_trace_buf_nmi; - rcu_assign_pointer(perf_trace_buf_nmi, NULL); - - /* - * Ensure every events in profiling have finished before - * releasing the buffers - */ - synchronize_sched(); - - free_percpu(buf); - free_percpu(nmi_buf); - } -} - -void ftrace_profile_disable(int event_id) -{ - struct ftrace_event_call *event; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id) { - ftrace_profile_disable_event(event); - module_put(event->mod); - break; - } - } - mutex_unlock(&event_mutex); -} - -__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, - int *rctxp, unsigned long *irq_flags) -{ - struct trace_entry *entry; - char *trace_buf, *raw_data; - int pc, cpu; - - pc = preempt_count(); - - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(*irq_flags); - - *rctxp = perf_swevent_get_recursion_context(); - if (*rctxp < 0) - goto err_recursion; - - cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto err; - - raw_data = per_cpu_ptr(trace_buf, cpu); - - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - - entry = (struct trace_entry *)raw_data; - tracing_generic_entry_update(entry, *irq_flags, pc); - entry->type = type; - - return raw_data; -err: - perf_swevent_put_recursion_context(*rctxp); -err_recursion: - local_irq_restore(*irq_flags); - return NULL; -} -EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, trace_create_file("enable", 0644, call->dir, call, enable); - if (call->id && call->profile_enable) + if (call->id && call->perf_event_enable) trace_create_file("id", 0444, call->dir, call, id); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f7a20a8bfb31..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void kprobe_profile_func(struct kprobe *kp, +static __kprobes void kprobe_perf_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); @@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ -static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags, regs); } -static int probe_profile_enable(struct ftrace_event_call *call) +static int probe_perf_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) return enable_kprobe(&tp->rp.kp); } -static void probe_profile_disable(struct ftrace_event_call *call) +static void probe_perf_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) kprobe_trace_func(kp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kprobe_profile_func(kp, regs); + kprobe_perf_func(kp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) kretprobe_trace_func(ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kretprobe_profile_func(ri, regs); + kretprobe_perf_func(ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) call->unregfunc = probe_event_disable; #ifdef CONFIG_PERF_EVENTS - call->profile_enable = probe_profile_enable; - call->profile_disable = probe_profile_disable; + call->perf_event_enable = probe_perf_enable; + call->perf_event_disable = probe_perf_disable; #endif call->data = tp; ret = trace_add_event_call(call); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7e6e84fb7b6c..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); #ifdef CONFIG_PERF_EVENTS -static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); -static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); -static int sys_prof_refcount_enter; -static int sys_prof_refcount_exit; +static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); +static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); +static int sys_perf_refcount_enter; +static int sys_perf_refcount_exit; -static void prof_syscall_enter(struct pt_regs *regs, long id) +static void perf_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; @@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "profile buffer not large enough")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "perf buffer not large enough")) return; - rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, sys_data->enter_event->id, &rctx, &flags); if (!rec) return; @@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysenter_enable(struct ftrace_event_call *call) +int perf_sysenter_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_enter) - ret = register_trace_sys_enter(prof_syscall_enter); + if (!sys_perf_refcount_enter) + ret = register_trace_sys_enter(perf_syscall_enter); if (ret) { pr_info("event trace: Could not activate" "syscall entry trace point"); } else { - set_bit(num, enabled_prof_enter_syscalls); - sys_prof_refcount_enter++; + set_bit(num, enabled_perf_enter_syscalls); + sys_perf_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysenter_disable(struct ftrace_event_call *call) +void perf_sysenter_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_enter--; - clear_bit(num, enabled_prof_enter_syscalls); - if (!sys_prof_refcount_enter) - unregister_trace_sys_enter(prof_syscall_enter); + sys_perf_refcount_enter--; + clear_bit(num, enabled_perf_enter_syscalls); + if (!sys_perf_refcount_enter) + unregister_trace_sys_enter(perf_syscall_enter); mutex_unlock(&syscall_trace_lock); } -static void prof_syscall_exit(struct pt_regs *regs, long ret) +static void perf_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; @@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) * Impossible, but be paranoid with the future * How to put this check outside runtime? */ - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "exit event has grown above profile buffer size")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "exit event has grown above perf buffer size")) return; - rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, sys_data->exit_event->id, &rctx, &flags); if (!rec) return; @@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysexit_enable(struct ftrace_event_call *call) +int perf_sysexit_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_exit) - ret = register_trace_sys_exit(prof_syscall_exit); + if (!sys_perf_refcount_exit) + ret = register_trace_sys_exit(perf_syscall_exit); if (ret) { pr_info("event trace: Could not activate" "syscall exit trace point"); } else { - set_bit(num, enabled_prof_exit_syscalls); - sys_prof_refcount_exit++; + set_bit(num, enabled_perf_exit_syscalls); + sys_perf_refcount_exit++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysexit_disable(struct ftrace_event_call *call) +void perf_sysexit_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_exit--; - clear_bit(num, enabled_prof_exit_syscalls); - if (!sys_prof_refcount_exit) - unregister_trace_sys_exit(prof_syscall_exit); + sys_perf_refcount_exit--; + clear_bit(num, enabled_perf_exit_syscalls); + if (!sys_perf_refcount_exit) + unregister_trace_sys_exit(perf_syscall_exit); mutex_unlock(&syscall_trace_lock); } -- cgit v1.2.3 From 220b140b52ab6cc133f674a7ffec8fa792054f25 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 10 Mar 2010 20:45:52 +1100 Subject: perf_event: Fix oops triggered by cpu offline/online Anton Blanchard found that he could reliably make the kernel hit a BUG_ON in the slab allocator by taking a cpu offline and then online while a system-wide perf record session was running. The reason is that when the cpu comes up, we completely reinitialize the ctx field of the struct perf_cpu_context for the cpu. If there is a system-wide perf record session running, then there will be a struct perf_event that has a reference to the context, so its refcount will be 2. (The perf_event has been removed from the context's group_entry and event_entry lists by perf_event_exit_cpu(), but that doesn't remove the perf_event's reference to the context and doesn't decrement the context's refcount.) When the cpu comes up, perf_event_init_cpu() gets called, and it calls __perf_event_init_context() on the cpu's context. That resets the refcount to 1. Then when the perf record session finishes and the perf_event is closed, the refcount gets decremented to 0 and the context gets kfreed after an RCU grace period. Since the context wasn't kmalloced -- it's part of a per-cpu variable -- bad things happen. In fact we don't need to completely reinitialize the context when the cpu comes up. It's sufficient to initialize the context once at boot, but we need to do it for all possible cpus. This moves the context initialization to happen at boot time. With this, we don't trash the refcount and the context never gets kfreed, and we don't hit the BUG_ON. Reported-by: Anton Blanchard Signed-off-by: Paul Mackerras Tested-by: Anton Blanchard Acked-by: Peter Zijlstra Cc: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c502b18594cc..fb3031cf9f17 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5368,12 +5368,22 @@ int perf_event_init_task(struct task_struct *child) return ret; } +static void __init perf_event_init_all_cpus(void) +{ + int cpu; + struct perf_cpu_context *cpuctx; + + for_each_possible_cpu(cpu) { + cpuctx = &per_cpu(perf_cpu_context, cpu); + __perf_event_init_context(&cpuctx->ctx, NULL); + } +} + static void __cpuinit perf_event_init_cpu(int cpu) { struct perf_cpu_context *cpuctx; cpuctx = &per_cpu(perf_cpu_context, cpu); - __perf_event_init_context(&cpuctx->ctx, NULL); spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; @@ -5439,6 +5449,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { void __init perf_event_init(void) { + perf_event_init_all_cpus(); perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, -- cgit v1.2.3 From 83ff56f46a8532488ee364bb93a9cb2a59490d33 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 9 Mar 2010 10:22:19 -0500 Subject: kprobes: Calculate the index correctly when freeing the out-of-line execution slot From : Ananth N Mavinakayanahalli When freeing the instruction slot, the arithmetic to calculate the index of the slot in the page needs to account for the total size of the instruction on the various architectures. Calculate the index correctly when freeing the out-of-line execution slot. Reported-by: Sachin Sant Reported-by: Heiko Carstens Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Masami Hiramatsu LKML-Reference: <4B9667AB.9050507@redhat.com> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fa034d29cf73..0ed46f3e51e9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, struct kprobe_insn_page *kip; list_for_each_entry(kip, &c->pages, list) { - long idx = ((long)slot - (long)kip->insns) / c->insn_size; + long idx = ((long)slot - (long)kip->insns) / + (c->insn_size * sizeof(kprobe_opcode_t)); if (idx >= 0 && idx < slots_per_page(c)) { WARN_ON(kip->slot_used[idx] != SLOT_USED); if (dirty) { -- cgit v1.2.3 From 639fe4b12f92b54c9c3b38c82cdafaa38cfd3e63 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 11 Mar 2010 15:30:35 +0800 Subject: perf: export perf_trace_regs and perf_arch_fetch_caller_regs Export perf_trace_regs and perf_arch_fetch_caller_regs since module will use these. Signed-off-by: Xiao Guangrong [ use EXPORT_PER_CPU_SYMBOL_GPL() ] Signed-off-by: Peter Zijlstra LKML-Reference: <4B989C1B.2090407@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 1 + kernel/trace/trace_event_perf.c | 1 + 2 files changed, 2 insertions(+) (limited to 'kernel') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5fb490c6ee5c..7645faea8e85 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1713,3 +1713,4 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski regs->cs = __KERNEL_CS; local_save_flags(regs->flags); } +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index f315b12a41d8..0709e4f75114 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -10,6 +10,7 @@ #include "trace.h" DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); +EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; -- cgit v1.2.3 From dcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 16 Mar 2010 01:05:02 +0100 Subject: perf: Fix unexported generic perf_arch_fetch_caller_regs perf_arch_fetch_caller_regs() is exported for the overriden x86 version, but not for the generic weak version. As a general rule, weak functions should not have their symbol exported in the same file they are defined. So let's export it on trace_event_perf.c as it is used by trace events only. This fixes: ERROR: ".perf_arch_fetch_caller_regs" [fs/xfs/xfs.ko] undefined! ERROR: ".perf_arch_fetch_caller_regs" [arch/powerpc/platforms/cell/spufs/spufs.ko] undefined! -v2: And also only build it if trace events are enabled. -v3: Fix changelog mistake Reported-by: Stephen Rothwell Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Xiao Guangrong Cc: Paul Mackerras LKML-Reference: <1268697902-9518-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 3 ++- kernel/perf_event.c | 2 ++ kernel/trace/trace_event_perf.c | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 7645faea8e85..60398a0d947c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1702,6 +1702,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } +#ifdef CONFIG_EVENT_TRACING void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) { regs->ip = ip; @@ -1713,4 +1714,4 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski regs->cs = __KERNEL_CS; local_save_flags(regs->flags); } -EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); +#endif diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fb3031cf9f17..574ee58a3046 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,10 +2786,12 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +#ifdef CONFIG_EVENT_TRACING __weak void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) { } +#endif /* * Output diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0709e4f75114..7d79a10c3cde 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -12,6 +12,8 @@ DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); + static char *perf_trace_buf; static char *perf_trace_buf_nmi; -- cgit v1.2.3