summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/bpf_skel/off_cpu.bpf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-07-02 09:28:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-07-02 09:28:36 -0700
commitbb7c51268776941b7533374caabcaaed302b91e0 (patch)
treeee85d082f387a196b91e87da6c3f2df3fe1c33b6 /tools/perf/util/bpf_skel/off_cpu.bpf.c
parent5411de073362300d99bb35d46d77d656760e4606 (diff)
parentff898552fb32d255517fb0676f9fa500664c484d (diff)
downloadlinux-bb7c51268776941b7533374caabcaaed302b91e0.tar.bz2
Merge tag 'perf-tools-fixes-for-v5.19-2022-07-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tools fixes from Arnaldo Carvalho de Melo: - BPF program info linear (BPIL) data is accessed assuming 64-bit alignment resulting in undefined behavior as the data is just byte aligned. Fix it, Found using -fsanitize=undefined. - Fix 'perf offcpu' build on old kernels wrt task_struct's state/__state field. - Fix perf_event_attr.sample_type setting on the 'offcpu-time' event synthesized by the 'perf offcpu' tool. - Don't bail out when synthesizing PERF_RECORD_ events for pre-existing threads when one goes away while parsing its procfs entries. - Don't sort the task scan result from /proc, its not needed and introduces bugs when the main thread isn't the first one to be processed. - Fix uninitialized 'offset' variable on aarch64 in the unwind code. - Sync KVM headers with the kernel sources. * tag 'perf-tools-fixes-for-v5.19-2022-07-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: perf synthetic-events: Ignore dead threads during event synthesis perf synthetic-events: Don't sort the task scan result from /proc perf unwind: Fix unitialized 'offset' variable on aarch64 tools headers UAPI: Sync linux/kvm.h with the kernel sources perf bpf: 8 byte align bpil data tools kvm headers arm64: Update KVM headers from the kernel sources perf offcpu: Accept allowed sample types only perf offcpu: Fix build failure on old kernels
Diffstat (limited to 'tools/perf/util/bpf_skel/off_cpu.bpf.c')
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index 792ae2847080..cc6d7fd55118 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -71,6 +71,11 @@ struct {
__uint(max_entries, 1);
} cgroup_filter SEC(".maps");
+/* new kernel task_struct definition */
+struct task_struct___new {
+ long __state;
+} __attribute__((preserve_access_index));
+
/* old kernel task_struct definition */
struct task_struct___old {
long state;
@@ -93,14 +98,17 @@ const volatile bool uses_cgroup_v1 = false;
*/
static inline int get_task_state(struct task_struct *t)
{
- if (bpf_core_field_exists(t->__state))
- return BPF_CORE_READ(t, __state);
+ /* recast pointer to capture new type for compiler */
+ struct task_struct___new *t_new = (void *)t;
- /* recast pointer to capture task_struct___old type for compiler */
- struct task_struct___old *t_old = (void *)t;
+ if (bpf_core_field_exists(t_new->__state)) {
+ return BPF_CORE_READ(t_new, __state);
+ } else {
+ /* recast pointer to capture old type for compiler */
+ struct task_struct___old *t_old = (void *)t;
- /* now use old "state" name of the field */
- return BPF_CORE_READ(t_old, state);
+ return BPF_CORE_READ(t_old, state);
+ }
}
static inline __u64 get_cgroup_id(struct task_struct *t)