summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/callchain.h
blob: 8ac8f043004c792c4f265049ec85c1fcc4c3225a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
#ifndef __PERF_CALLCHAIN_H
#define __PERF_CALLCHAIN_H

#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
#include "symbol.h"

#define HELP_PAD "\t\t\t\t"

#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"

#ifdef HAVE_DWARF_UNWIND_SUPPORT
# define RECORD_MODE_HELP  HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n"
#else
# define RECORD_MODE_HELP  HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n"
#endif

#define RECORD_SIZE_HELP						\
	HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \
	HELP_PAD "\t\tdefault: 8192 (bytes)\n"

#define CALLCHAIN_RECORD_HELP  CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP

#define CALLCHAIN_REPORT_HELP						\
	HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|folded|none)\n" \
	HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \
	HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \
	HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \
	HELP_PAD "sort_key:\tcall graph sort key (function|address)\n"	\
	HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n" \
	HELP_PAD "value:\t\tcall graph value (percent|period|count)\n"

enum perf_call_graph_mode {
	CALLCHAIN_NONE,
	CALLCHAIN_FP,
	CALLCHAIN_DWARF,
	CALLCHAIN_LBR,
	CALLCHAIN_MAX
};

enum chain_mode {
	CHAIN_NONE,
	CHAIN_FLAT,
	CHAIN_GRAPH_ABS,
	CHAIN_GRAPH_REL,
	CHAIN_FOLDED,
};

enum chain_order {
	ORDER_CALLER,
	ORDER_CALLEE
};

struct callchain_node {
	struct callchain_node	*parent;
	struct list_head	val;
	struct list_head	parent_val;
	struct rb_node		rb_node_in; /* to insert nodes in an rbtree */
	struct rb_node		rb_node;    /* to sort nodes in an output tree */
	struct rb_root		rb_root_in; /* input tree of children */
	struct rb_root		rb_root;    /* sorted output tree of children */
	unsigned int		val_nr;
	unsigned int		count;
	unsigned int		children_count;
	u64			hit;
	u64			children_hit;
};

struct callchain_root {
	u64			max_depth;
	struct callchain_node	node;
};

struct callchain_param;

typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
				 u64, struct callchain_param *);

enum chain_key {
	CCKEY_FUNCTION,
	CCKEY_ADDRESS
};

enum chain_value {
	CCVAL_PERCENT,
	CCVAL_PERIOD,
	CCVAL_COUNT,
};

struct callchain_param {
	bool			enabled;
	enum perf_call_graph_mode record_mode;
	u32			dump_size;
	enum chain_mode 	mode;
	u32			print_limit;
	double			min_percent;
	sort_chain_func_t	sort;
	enum chain_order	order;
	bool			order_set;
	enum chain_key		key;
	bool			branch_callstack;
	enum chain_value	value;
};

extern struct callchain_param callchain_param;

struct callchain_list {
	u64			ip;
	struct map_symbol	ms;
	struct /* for TUI */ {
		bool		unfolded;
		bool		has_children;
	};
	char		       *srcline;
	struct list_head	list;
};

/*
 * A callchain cursor is a single linked list that
 * let one feed a callchain progressively.
 * It keeps persistent allocated entries to minimize
 * allocations.
 */
struct callchain_cursor_node {
	u64				ip;
	struct map			*map;
	struct symbol			*sym;
	struct callchain_cursor_node	*next;
};

struct callchain_cursor {
	u64				nr;
	struct callchain_cursor_node	*first;
	struct callchain_cursor_node	**last;
	u64				pos;
	struct callchain_cursor_node	*curr;
};

extern __thread struct callchain_cursor callchain_cursor;

static inline void callchain_init(struct callchain_root *root)
{
	INIT_LIST_HEAD(&root->node.val);
	INIT_LIST_HEAD(&root->node.parent_val);

	root->node.parent = NULL;
	root->node.hit = 0;
	root->node.children_hit = 0;
	root->node.rb_root_in = RB_ROOT;
	root->max_depth = 0;
}

static inline u64 callchain_cumul_hits(struct callchain_node *node)
{
	return node->hit + node->children_hit;
}

static inline unsigned callchain_cumul_counts(struct callchain_node *node)
{
	return node->count + node->children_count;
}

int callchain_register_param(struct callchain_param *param);
int callchain_append(struct callchain_root *root,
		     struct callchain_cursor *cursor,
		     u64 period);

int callchain_merge(struct callchain_cursor *cursor,
		    struct callchain_root *dst, struct callchain_root *src);

/*
 * Initialize a cursor before adding entries inside, but keep
 * the previously allocated entries as a cache.
 */
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
{
	cursor->nr = 0;
	cursor->last = &cursor->first;
}

int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
			    struct map *map, struct symbol *sym);

/* Close a cursor writing session. Initialize for the reader */
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
{
	cursor->curr = cursor->first;
	cursor->pos = 0;
}

/* Cursor reading iteration helpers */
static inline struct callchain_cursor_node *
callchain_cursor_current(struct callchain_cursor *cursor)
{
	if (cursor->pos == cursor->nr)
		return NULL;

	return cursor->curr;
}

static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
{
	cursor->curr = cursor->curr->next;
	cursor->pos++;
}

struct option;
struct hist_entry;

int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
int record_callchain_opt(const struct option *opt, const char *arg, int unset);

int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
			      struct perf_evsel *evsel, struct addr_location *al,
			      int max_stack);
int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
			bool hide_unresolved);

extern const char record_callchain_help[];
extern int parse_callchain_record(const char *arg, struct callchain_param *param);
int parse_callchain_record_opt(const char *arg, struct callchain_param *param);
int parse_callchain_report_opt(const char *arg);
int parse_callchain_top_opt(const char *arg);
int perf_callchain_config(const char *var, const char *value);

static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
					     struct callchain_cursor *src)
{
	*dest = *src;

	dest->first = src->curr;
	dest->nr -= src->pos;
}

#ifdef HAVE_SKIP_CALLCHAIN_IDX
extern int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain);
#else
static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
			struct ip_callchain *chain __maybe_unused)
{
	return -1;
}
#endif

char *callchain_list__sym_name(struct callchain_list *cl,
			       char *bf, size_t bfsize, bool show_dso);
char *callchain_node__scnprintf_value(struct callchain_node *node,
				      char *bf, size_t bfsize, u64 total);
int callchain_node__fprintf_value(struct callchain_node *node,
				  FILE *fp, u64 total);

void free_callchain(struct callchain_root *root);
int callchain_node__make_parent_list(struct callchain_node *node);

#endif	/* __PERF_CALLCHAIN_H */