summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/code-patching.h
blob: f1d029bf906e59b52506624eb77539d90d652726 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_CODE_PATCHING_H
#define _ASM_POWERPC_CODE_PATCHING_H

/*
 * Copyright 2008, Michael Ellerman, IBM Corporation.
 */

#include <asm/types.h>
#include <asm/ppc-opcode.h>
#include <linux/string.h>
#include <linux/kallsyms.h>
#include <asm/asm-compat.h>
#include <asm/inst.h>

/* Flags for create_branch:
 * "b"   == create_branch(addr, target, 0);
 * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
 * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
 */
#define BRANCH_SET_LINK	0x1
#define BRANCH_ABSOLUTE	0x2

bool is_offset_in_branch_range(long offset);
int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
		  unsigned long target, int flags);
int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
		       unsigned long target, int flags);
int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);

static inline unsigned long patch_site_addr(s32 *site)
{
	return (unsigned long)site + *site;
}

static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
	return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
}

static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
	return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
}

static inline int modify_instruction(unsigned int *addr, unsigned int clr,
				     unsigned int set)
{
	return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
}

static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
{
	return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
}

int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
unsigned long branch_target(const struct ppc_inst *instr);
int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
		     const struct ppc_inst *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
#define patch_exception(exc, name) do { \
	extern unsigned int name; \
	__patch_exception((exc), (unsigned long)&name); \
} while (0)
#endif

#define OP_RT_RA_MASK	0xffff0000UL
#define LIS_R2		(PPC_INST_ADDIS | __PPC_RT(R2))
#define ADDIS_R2_R12	(PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
#define ADDI_R2_R2	(PPC_INST_ADDI  | __PPC_RT(R2) | __PPC_RA(R2))


static inline unsigned long ppc_function_entry(void *func)
{
#ifdef PPC64_ELF_ABI_v2
	u32 *insn = func;

	/*
	 * A PPC64 ABIv2 function may have a local and a global entry
	 * point. We need to use the local entry point when patching
	 * functions, so identify and step over the global entry point
	 * sequence.
	 *
	 * The global entry point sequence is always of the form:
	 *
	 * addis r2,r12,XXXX
	 * addi  r2,r2,XXXX
	 *
	 * A linker optimisation may convert the addis to lis:
	 *
	 * lis   r2,XXXX
	 * addi  r2,r2,XXXX
	 */
	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
		return (unsigned long)(insn + 2);
	else
		return (unsigned long)func;
#elif defined(PPC64_ELF_ABI_v1)
	/*
	 * On PPC64 ABIv1 the function pointer actually points to the
	 * function's descriptor. The first entry in the descriptor is the
	 * address of the function text.
	 */
	return ((func_descr_t *)func)->entry;
#else
	return (unsigned long)func;
#endif
}

static inline unsigned long ppc_global_function_entry(void *func)
{
#ifdef PPC64_ELF_ABI_v2
	/* PPC64 ABIv2 the global entry point is at the address */
	return (unsigned long)func;
#else
	/* All other cases there is no change vs ppc_function_entry() */
	return ppc_function_entry(func);
#endif
}

/*
 * Wrapper around kallsyms_lookup() to return function entry address:
 * - For ABIv1, we lookup the dot variant.
 * - For ABIv2, we return the local entry point.
 */
static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
{
	unsigned long addr;
#ifdef PPC64_ELF_ABI_v1
	/* check for dot variant */
	char dot_name[1 + KSYM_NAME_LEN];
	bool dot_appended = false;

	if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
		return 0;

	if (name[0] != '.') {
		dot_name[0] = '.';
		dot_name[1] = '\0';
		strlcat(dot_name, name, sizeof(dot_name));
		dot_appended = true;
	} else {
		dot_name[0] = '\0';
		strlcat(dot_name, name, sizeof(dot_name));
	}
	addr = kallsyms_lookup_name(dot_name);
	if (!addr && dot_appended)
		/* Let's try the original non-dot symbol lookup	*/
		addr = kallsyms_lookup_name(name);
#elif defined(PPC64_ELF_ABI_v2)
	addr = kallsyms_lookup_name(name);
	if (addr)
		addr = ppc_function_entry((void *)addr);
#else
	addr = kallsyms_lookup_name(name);
#endif
	return addr;
}

#ifdef CONFIG_PPC64
/*
 * Some instruction encodings commonly used in dynamic ftracing
 * and function live patching.
 */

/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
#ifdef PPC64_ELF_ABI_v2
#define R2_STACK_OFFSET         24
#else
#define R2_STACK_OFFSET         40
#endif

#define PPC_INST_LD_TOC		(PPC_INST_LD  | ___PPC_RT(__REG_R2) | \
				 ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)

/* usually preceded by a mflr r0 */
#define PPC_INST_STD_LR		(PPC_INST_STD | ___PPC_RS(__REG_R0) | \
				 ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */

#endif /* _ASM_POWERPC_CODE_PATCHING_H */