summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/ftrace.c
blob: 0b81a784e0394564584fa6ee67abfca42de222c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/*
 * Dynamic function tracer architecture backend.
 *
 * Copyright IBM Corp. 2009
 *
 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
 *
 */

#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/lowcore.h>

void ftrace_disable_code(void);
void ftrace_call_code(void);
void ftrace_nop_code(void);

#define FTRACE_INSN_SIZE 4

#ifdef CONFIG_64BIT

asm(
	"	.align	4\n"
	"ftrace_disable_code:\n"
	"	j	0f\n"
	"	.word	0x0024\n"
	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
	"	basr	%r14,%r1\n"
	"	lg	%r14,8(15)\n"
	"	lgr	%r0,%r0\n"
	"0:\n");

asm(
	"	.align	4\n"
	"ftrace_nop_code:\n"
	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");

asm(
	"	.align	4\n"
	"ftrace_call_code:\n"
	"	stg	%r14,8(%r15)\n");

#else /* CONFIG_64BIT */

asm(
	"	.align	4\n"
	"ftrace_disable_code:\n"
	"	j	0f\n"
	"	l	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
	"	basr	%r14,%r1\n"
	"	l	%r14,4(%r15)\n"
	"	j	0f\n"
	"	bcr	0,%r7\n"
	"	bcr	0,%r7\n"
	"	bcr	0,%r7\n"
	"	bcr	0,%r7\n"
	"	bcr	0,%r7\n"
	"	bcr	0,%r7\n"
	"0:\n");

asm(
	"	.align	4\n"
	"ftrace_nop_code:\n"
	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");

asm(
	"	.align	4\n"
	"ftrace_call_code:\n"
	"	st	%r14,4(%r15)\n");

#endif /* CONFIG_64BIT */

static int ftrace_modify_code(unsigned long ip,
			      void *old_code, int old_size,
			      void *new_code, int new_size)
{
	unsigned char replaced[MCOUNT_INSN_SIZE];

	/*
	 * Note: Due to modules code can disappear and change.
	 *  We need to protect against faulting as well as code
	 *  changing. We do this by using the probe_kernel_*
	 *  functions.
	 *  This however is just a simple sanity check.
	 */
	if (probe_kernel_read(replaced, (void *)ip, old_size))
		return -EFAULT;
	if (memcmp(replaced, old_code, old_size) != 0)
		return -EINVAL;
	if (probe_kernel_write((void *)ip, new_code, new_size))
		return -EPERM;
	return 0;
}

static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
				   unsigned long addr)
{
	return ftrace_modify_code(rec->ip,
				  ftrace_call_code, FTRACE_INSN_SIZE,
				  ftrace_disable_code, MCOUNT_INSN_SIZE);
}

int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
		    unsigned long addr)
{
	if (addr == MCOUNT_ADDR)
		return ftrace_make_initial_nop(mod, rec, addr);
	return ftrace_modify_code(rec->ip,
				  ftrace_call_code, FTRACE_INSN_SIZE,
				  ftrace_nop_code, FTRACE_INSN_SIZE);
}

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
	return ftrace_modify_code(rec->ip,
				  ftrace_nop_code, FTRACE_INSN_SIZE,
				  ftrace_call_code, FTRACE_INSN_SIZE);
}

int ftrace_update_ftrace_func(ftrace_func_t func)
{
	ftrace_dyn_func = (unsigned long)func;
	return 0;
}

int __init ftrace_dyn_arch_init(void *data)
{
	*(unsigned long *)data = 0;
	return 0;
}