summaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel/stacktrace.c
blob: 5372dc04e784789291f0f7a8ee38e624c4fb30b8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
// SPDX-License-Identifier: GPL-2.0-only
/*
 *	stacktrace.c : stacktracing APIs needed by rest of kernel
 *			(wrappers over ARC dwarf based unwinder)
 *
 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 *
 *  vineetg: aug 2009
 *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
 *   for displaying task's kernel mode call stack in /proc/<pid>/stack
 *  -Iterator based approach to have single copy of unwinding core and APIs
 *   needing unwinding, implement the logic in iterator regarding:
 *      = which frame onwards to start capture
 *      = which frame to stop capturing (wchan)
 *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
 *
 *  vineetg: March 2009
 *  -Implemented correct versions of thread_saved_pc() and __get_wchan()
 *
 *  rajeshwarr: 2008
 *  -Initial implementation
 */

#include <linux/ptrace.h>
#include <linux/export.h>
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/sched/debug.h>

#include <asm/arcregs.h>
#include <asm/unwind.h>
#include <asm/switch_to.h>

/*-------------------------------------------------------------------------
 *              Unwinder Iterator
 *-------------------------------------------------------------------------
 */

#ifdef CONFIG_ARC_DW2_UNWIND

static int
seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
		       struct unwind_frame_info *frame_info)
{
	if (regs) {
		/*
		 * Asynchronous unwinding of intr/exception
		 *  - Just uses the pt_regs passed
		 */
		frame_info->task = tsk;

		frame_info->regs.r27 = regs->fp;
		frame_info->regs.r28 = regs->sp;
		frame_info->regs.r31 = regs->blink;
		frame_info->regs.r63 = regs->ret;
		frame_info->call_frame = 0;
	} else if (tsk == NULL || tsk == current) {
		/*
		 * synchronous unwinding (e.g. dump_stack)
		 *  - uses current values of SP and friends
		 */
		unsigned long fp, sp, blink, ret;
		frame_info->task = current;

		__asm__ __volatile__(
			"mov %0,r27\n\t"
			"mov %1,r28\n\t"
			"mov %2,r31\n\t"
			"mov %3,r63\n\t"
			: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
		);

		frame_info->regs.r27 = fp;
		frame_info->regs.r28 = sp;
		frame_info->regs.r31 = blink;
		frame_info->regs.r63 = ret;
		frame_info->call_frame = 0;
	} else {
		/*
		 * Asynchronous unwinding of a likely sleeping task
		 *  - first ensure it is actually sleeping
		 *  - if so, it will be in __switch_to, kernel mode SP of task
		 *    is safe-kept and BLINK at a well known location in there
		 */

		if (task_is_running(tsk))
			return -1;

		frame_info->task = tsk;

		frame_info->regs.r27 = TSK_K_FP(tsk);
		frame_info->regs.r28 = TSK_K_ESP(tsk);
		frame_info->regs.r31 = TSK_K_BLINK(tsk);
		frame_info->regs.r63 = (unsigned int)__switch_to;

		/* In the prologue of __switch_to, first FP is saved on stack
		 * and then SP is copied to FP. Dwarf assumes cfa as FP based
		 * but we didn't save FP. The value retrieved above is FP's
		 * state in previous frame.
		 * As a work around for this, we unwind from __switch_to start
		 * and adjust SP accordingly. The other limitation is that
		 * __switch_to macro is dwarf rules are not generated for inline
		 * assembly code
		 */
		frame_info->regs.r27 = 0;
		frame_info->regs.r28 += 60;
		frame_info->call_frame = 0;

	}
	return 0;
}

#endif

notrace noinline unsigned int
arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
		int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
	int ret = 0, cnt = 0;
	unsigned int address;
	struct unwind_frame_info frame_info;

	if (seed_unwind_frame_info(tsk, regs, &frame_info))
		return 0;

	while (1) {
		address = UNW_PC(&frame_info);

		if (!address || !__kernel_text_address(address))
			break;

		if (consumer_fn(address, arg) == -1)
			break;

		ret = arc_unwind(&frame_info);
		if (ret)
			break;

		frame_info.regs.r63 = frame_info.regs.r31;

		if (cnt++ > 128) {
			printk("unwinder looping too long, aborting !\n");
			return 0;
		}
	}

	return address;		/* return the last address it saw */
#else
	/* On ARC, only Dward based unwinder works. fp based backtracing is
	 * not possible (-fno-omit-frame-pointer) because of the way function
	 * prologue is setup (callee regs saved and then fp set and not other
	 * way around
	 */
	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
	return 0;

#endif
}

/*-------------------------------------------------------------------------
 * callbacks called by unwinder iterator to implement kernel APIs
 *
 * The callback can return -1 to force the iterator to stop, which by default
 * keeps going till the bottom-most frame.
 *-------------------------------------------------------------------------
 */

/* Call-back which plugs into unwinding core to dump the stack in
 * case of panic/OOPs/BUG etc
 */
static int __print_sym(unsigned int address, void *arg)
{
	const char *loglvl = arg;

	printk("%s  %pS\n", loglvl, (void *)address);
	return 0;
}

#ifdef CONFIG_STACKTRACE

/* Call-back which plugs into unwinding core to capture the
 * traces needed by kernel on /proc/<pid>/stack
 */
static int __collect_all(unsigned int address, void *arg)
{
	struct stack_trace *trace = arg;

	if (trace->skip > 0)
		trace->skip--;
	else
		trace->entries[trace->nr_entries++] = address;

	if (trace->nr_entries >= trace->max_entries)
		return -1;

	return 0;
}

static int __collect_all_but_sched(unsigned int address, void *arg)
{
	struct stack_trace *trace = arg;

	if (in_sched_functions(address))
		return 0;

	if (trace->skip > 0)
		trace->skip--;
	else
		trace->entries[trace->nr_entries++] = address;

	if (trace->nr_entries >= trace->max_entries)
		return -1;

	return 0;
}

#endif

static int __get_first_nonsched(unsigned int address, void *unused)
{
	if (in_sched_functions(address))
		return 0;

	return -1;
}

/*-------------------------------------------------------------------------
 *              APIs expected by various kernel sub-systems
 *-------------------------------------------------------------------------
 */

noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
			      const char *loglvl)
{
	printk("%s\nStack Trace:\n", loglvl);
	arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
}
EXPORT_SYMBOL(show_stacktrace);

/* Expected by sched Code */
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
	show_stacktrace(tsk, NULL, loglvl);
}

/* Another API expected by schedular, shows up in "ps" as Wait Channel
 * Of course just returning schedule( ) would be pointless so unwind until
 * the function is not in schedular code
 */
unsigned int __get_wchan(struct task_struct *tsk)
{
	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
}

#ifdef CONFIG_STACKTRACE

/*
 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
 * A typical use is when /proc/<pid>/stack is queried by userland
 */
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	/* Assumes @tsk is sleeping so unwinds from __switch_to */
	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
}

void save_stack_trace(struct stack_trace *trace)
{
	/* Pass NULL for task so it unwinds the current call frame */
	arc_unwind_core(NULL, NULL, __collect_all, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif