summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-04-25 13:03:31 +0200
committerIngo Molnar <mingo@kernel.org>2019-04-29 16:05:49 +0200
commit1fc654cf6e04b402ba9c4327b2919ea864037e7a (patch)
treed012f50e6390e2cbd533035f5b9c18ac9e4a909d /arch/x86/kernel
parentfc93dfd9345bb8b29a62b21cb0447dd1a3815f91 (diff)
downloadlinux-1fc654cf6e04b402ba9c4327b2919ea864037e7a.tar.bz2
x86/paravirt: Standardize 'insn_buff' variable names
We currently have 6 (!) separate naming variants to name temporary instruction buffers that are used for code patching: - insnbuf - insnbuff - insn_buff - insn_buffer - ibuf - ibuffer These are used as local variables, percpu fields and function parameters. Standardize all the names to a single variant: 'insn_buff'. Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/alternative.c52
-rw-r--r--arch/x86/kernel/kprobes/opt.c16
-rw-r--r--arch/x86/kernel/paravirt.c22
-rw-r--r--arch/x86/kernel/paravirt_patch.c42
4 files changed, 66 insertions, 66 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 9a79c7808f9c..92eafd1d3493 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -275,7 +275,7 @@ static inline bool is_jmp(const u8 opcode)
}
static void __init_or_module
-recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
+recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
{
u8 *next_rip, *tgt_rip;
s32 n_dspl, o_dspl;
@@ -284,7 +284,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
if (a->replacementlen != 5)
return;
- o_dspl = *(s32 *)(insnbuf + 1);
+ o_dspl = *(s32 *)(insn_buff + 1);
/* next_rip of the replacement JMP */
next_rip = repl_insn + a->replacementlen;
@@ -310,9 +310,9 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
two_byte_jmp:
n_dspl -= 2;
- insnbuf[0] = 0xeb;
- insnbuf[1] = (s8)n_dspl;
- add_nops(insnbuf + 2, 3);
+ insn_buff[0] = 0xeb;
+ insn_buff[1] = (s8)n_dspl;
+ add_nops(insn_buff + 2, 3);
repl_len = 2;
goto done;
@@ -320,8 +320,8 @@ two_byte_jmp:
five_byte_jmp:
n_dspl -= 5;
- insnbuf[0] = 0xe9;
- *(s32 *)&insnbuf[1] = n_dspl;
+ insn_buff[0] = 0xe9;
+ *(s32 *)&insn_buff[1] = n_dspl;
repl_len = 5;
@@ -368,7 +368,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
{
struct alt_instr *a;
u8 *instr, *replacement;
- u8 insnbuf[MAX_PATCH_LEN];
+ u8 insn_buff[MAX_PATCH_LEN];
DPRINTK("alt table %px, -> %px", start, end);
/*
@@ -381,11 +381,11 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* order.
*/
for (a = start; a < end; a++) {
- int insnbuf_sz = 0;
+ int insn_buff_sz = 0;
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
- BUG_ON(a->instrlen > sizeof(insnbuf));
+ BUG_ON(a->instrlen > sizeof(insn_buff));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
if (!boot_cpu_has(a->cpuid)) {
if (a->padlen > 1)
@@ -403,8 +403,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
- memcpy(insnbuf, replacement, a->replacementlen);
- insnbuf_sz = a->replacementlen;
+ memcpy(insn_buff, replacement, a->replacementlen);
+ insn_buff_sz = a->replacementlen;
/*
* 0xe8 is a relative jump; fix the offset.
@@ -412,24 +412,24 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* Instruction length is checked before the opcode to avoid
* accessing uninitialized bytes for zero-length replacements.
*/
- if (a->replacementlen == 5 && *insnbuf == 0xe8) {
- *(s32 *)(insnbuf + 1) += replacement - instr;
+ if (a->replacementlen == 5 && *insn_buff == 0xe8) {
+ *(s32 *)(insn_buff + 1) += replacement - instr;
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
- *(s32 *)(insnbuf + 1),
- (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
+ *(s32 *)(insn_buff + 1),
+ (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
}
if (a->replacementlen && is_jmp(replacement[0]))
- recompute_jump(a, instr, replacement, insnbuf);
+ recompute_jump(a, instr, replacement, insn_buff);
if (a->instrlen > a->replacementlen) {
- add_nops(insnbuf + a->replacementlen,
+ add_nops(insn_buff + a->replacementlen,
a->instrlen - a->replacementlen);
- insnbuf_sz += a->instrlen - a->replacementlen;
+ insn_buff_sz += a->instrlen - a->replacementlen;
}
- DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
+ DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
- text_poke_early(instr, insnbuf, insnbuf_sz);
+ text_poke_early(instr, insn_buff, insn_buff_sz);
}
}
@@ -591,22 +591,22 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{
struct paravirt_patch_site *p;
- char insnbuf[MAX_PATCH_LEN];
+ char insn_buff[MAX_PATCH_LEN];
for (p = start; p < end; p++) {
unsigned int used;
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
- memcpy(insnbuf, p->instr, p->len);
- used = pv_ops.init.patch(p->instrtype, insnbuf,
+ memcpy(insn_buff, p->instr, p->len);
+ used = pv_ops.init.patch(p->instrtype, insn_buff,
(unsigned long)p->instr, p->len);
BUG_ON(used > p->len);
/* Pad the rest with nops */
- add_nops(insnbuf + used, p->len - used);
- text_poke_early(p->instr, insnbuf, p->len);
+ add_nops(insn_buff + used, p->len - used);
+ text_poke_early(p->instr, insn_buff, p->len);
}
}
extern struct paravirt_patch_site __start_parainstructions[],
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index f14262952015..e77a895a9ecc 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -431,7 +431,7 @@ err:
void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
- u8 insn_buf[RELATIVEJUMP_SIZE];
+ u8 insn_buff[RELATIVEJUMP_SIZE];
list_for_each_entry_safe(op, tmp, oplist, list) {
s32 rel = (s32)((long)op->optinsn.insn -
@@ -443,10 +443,10 @@ void arch_optimize_kprobes(struct list_head *oplist)
memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
RELATIVE_ADDR_SIZE);
- insn_buf[0] = RELATIVEJUMP_OPCODE;
- *(s32 *)(&insn_buf[1]) = rel;
+ insn_buff[0] = RELATIVEJUMP_OPCODE;
+ *(s32 *)(&insn_buff[1]) = rel;
- text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
+ text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
op->optinsn.insn);
list_del_init(&op->list);
@@ -456,12 +456,12 @@ void arch_optimize_kprobes(struct list_head *oplist)
/* Replace a relative jump with a breakpoint (int3). */
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
- u8 insn_buf[RELATIVEJUMP_SIZE];
+ u8 insn_buff[RELATIVEJUMP_SIZE];
/* Set int3 to first byte for kprobes */
- insn_buf[0] = BREAKPOINT_INSTRUCTION;
- memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
- text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
+ insn_buff[0] = BREAKPOINT_INSTRUCTION;
+ memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+ text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
op->optinsn.insn);
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 544d386ded45..b7d22912e20b 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -70,11 +70,11 @@ struct branch {
u32 delta;
} __attribute__((packed));
-static unsigned paravirt_patch_call(void *insnbuf, const void *target,
+static unsigned paravirt_patch_call(void *insn_buff, const void *target,
unsigned long addr, unsigned len)
{
const int call_len = 5;
- struct branch *b = insnbuf;
+ struct branch *b = insn_buff;
unsigned long delta = (unsigned long)target - (addr+call_len);
if (len < call_len) {
@@ -97,10 +97,10 @@ u64 notrace _paravirt_ident_64(u64 x)
return x;
}
-static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+static unsigned paravirt_patch_jmp(void *insn_buff, const void *target,
unsigned long addr, unsigned len)
{
- struct branch *b = insnbuf;
+ struct branch *b = insn_buff;
unsigned long delta = (unsigned long)target - (addr+5);
if (len < 5) {
@@ -125,7 +125,7 @@ void __init native_pv_lock_init(void)
static_branch_disable(&virt_spin_lock_key);
}
-unsigned paravirt_patch_default(u8 type, void *insnbuf,
+unsigned paravirt_patch_default(u8 type, void *insn_buff,
unsigned long addr, unsigned len)
{
/*
@@ -137,28 +137,28 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,
if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */
- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+ ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a));
else if (opfunc == _paravirt_nop)
ret = 0;
#ifdef CONFIG_PARAVIRT_XXL
/* identity functions just return their single argument */
else if (opfunc == _paravirt_ident_64)
- ret = paravirt_patch_ident_64(insnbuf, len);
+ ret = paravirt_patch_ident_64(insn_buff, len);
else if (type == PARAVIRT_PATCH(cpu.iret) ||
type == PARAVIRT_PATCH(cpu.usergs_sysret64))
/* If operation requires a jmp, then jmp */
- ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
+ ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
#endif
else
/* Otherwise call the function. */
- ret = paravirt_patch_call(insnbuf, opfunc, addr, len);
+ ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
return ret;
}
-unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
const char *start, const char *end)
{
unsigned insn_len = end - start;
@@ -166,7 +166,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
/* Alternative instruction is too large for the patch site and we cannot continue: */
BUG_ON(insn_len > len || start == NULL);
- memcpy(insnbuf, start, insn_len);
+ memcpy(insn_buff, start, insn_len);
return insn_len;
}
diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
index 37b1d43d1e17..3eff63c090d2 100644
--- a/arch/x86/kernel/paravirt_patch.c
+++ b/arch/x86/kernel/paravirt_patch.c
@@ -10,12 +10,12 @@
#define PEND(d, m) \
(PSTART(d, m) + sizeof(patch_data_##d.m))
-#define PATCH(d, m, ibuf, len) \
- paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m))
+#define PATCH(d, m, insn_buff, len) \
+ paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
-#define PATCH_CASE(ops, m, data, ibuf, len) \
+#define PATCH_CASE(ops, m, data, insn_buff, len) \
case PARAVIRT_PATCH(ops.m): \
- return PATCH(data, ops##_##m, ibuf, len)
+ return PATCH(data, ops##_##m, insn_buff, len)
#ifdef CONFIG_PARAVIRT_XXL
struct patch_xxl {
@@ -57,10 +57,10 @@ static const struct patch_xxl patch_data_xxl = {
# endif
};
-unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
+unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
{
#ifdef CONFIG_X86_64
- return PATCH(xxl, mov64, insnbuf, len);
+ return PATCH(xxl, mov64, insn_buff, len);
#endif
return 0;
}
@@ -83,44 +83,44 @@ static const struct patch_lock patch_data_lock = {
};
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-unsigned int native_patch(u8 type, void *ibuf, unsigned long addr,
+unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
unsigned int len)
{
switch (type) {
#ifdef CONFIG_PARAVIRT_XXL
- PATCH_CASE(irq, restore_fl, xxl, ibuf, len);
- PATCH_CASE(irq, save_fl, xxl, ibuf, len);
- PATCH_CASE(irq, irq_enable, xxl, ibuf, len);
- PATCH_CASE(irq, irq_disable, xxl, ibuf, len);
+ PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
+ PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
+ PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
+ PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
- PATCH_CASE(mmu, read_cr2, xxl, ibuf, len);
- PATCH_CASE(mmu, read_cr3, xxl, ibuf, len);
- PATCH_CASE(mmu, write_cr3, xxl, ibuf, len);
+ PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
+ PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
+ PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
# ifdef CONFIG_X86_64
- PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len);
- PATCH_CASE(cpu, swapgs, xxl, ibuf, len);
- PATCH_CASE(cpu, wbinvd, xxl, ibuf, len);
+ PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
+ PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
+ PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
# else
- PATCH_CASE(cpu, iret, xxl, ibuf, len);
+ PATCH_CASE(cpu, iret, xxl, insn_buff, len);
# endif
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
case PARAVIRT_PATCH(lock.queued_spin_unlock):
if (pv_is_native_spin_unlock())
- return PATCH(lock, queued_spin_unlock, ibuf, len);
+ return PATCH(lock, queued_spin_unlock, insn_buff, len);
break;
case PARAVIRT_PATCH(lock.vcpu_is_preempted):
if (pv_is_native_vcpu_is_preempted())
- return PATCH(lock, vcpu_is_preempted, ibuf, len);
+ return PATCH(lock, vcpu_is_preempted, insn_buff, len);
break;
#endif
default:
break;
}
- return paravirt_patch_default(type, ibuf, addr, len);
+ return paravirt_patch_default(type, insn_buff, addr, len);
}