From 5f154c4e20d7edd38bddec78f3e0a7628057ef76 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Wed, 3 Mar 2021 18:05:29 +0100 Subject: arm64: Move patching utilities out of instruction encoding/decoding Files insn.[c|h] containt some functions used for instruction patching. In order to reuse the instruction encoder/decoder, move the patching utilities to their own file. Signed-off-by: Julien Thierry Link: https://lore.kernel.org/r/20210303170536.1838032-2-jthierry@redhat.com [will: Include patching.h in insn.h to fix header mess; add __ASSEMBLY__ guards] Signed-off-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/patching.c | 148 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 arch/arm64/kernel/patching.c (limited to 'arch/arm64/kernel/patching.c') diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c new file mode 100644 index 000000000000..9d050e33901b --- /dev/null +++ b/arch/arm64/kernel/patching.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(patch_lock); + +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + +static void __kprobes *patch_map(void *addr, int fixmap) +{ + unsigned long uintaddr = (uintptr_t) addr; + bool image = is_image_text(uintaddr); + struct page *page; + + if (image) + page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else + return addr; + + BUG_ON(!page); + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} +/* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ +int __kprobes aarch64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + unsigned long flags = 0; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes aarch64_insn_write(void *addr, u32 insn) +{ + return __aarch64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) +{ + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) + __flush_icache_range((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; +} + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +static int __kprobes aarch64_insn_patch_text_cb(void *arg) +{ + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The first CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == 1) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); + } else { + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) + cpu_relax(); + isb(); + } + + return ret; +} + +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) +{ + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); +} -- cgit v1.2.3 From 78b92c7337e10519312e8aab64d7a1651206bd61 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 9 Jun 2021 11:23:00 +0100 Subject: arm64: insn: decouple patching from insn code Currently, includes . We intend that will be usable from userspace, so it doesn't make sense to include headers for kernel-only features such as the patching routines, and we'd intended to restrict to instruction encoding details. Let's decouple the patching code from , and explicitly include where it is needed. Since isn't included from assembly, we can drop the __ASSEMBLY__ guards. At the same time, sort the kprobes includes so that it's easier to see what is and isn't incldued. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20210609102301.17332-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 1 - arch/arm64/include/asm/patching.h | 2 -- arch/arm64/kernel/ftrace.c | 1 + arch/arm64/kernel/jump_label.c | 1 + arch/arm64/kernel/kgdb.c | 1 + arch/arm64/kernel/patching.c | 1 + arch/arm64/kernel/probes/kprobes.c | 18 ++++++++++-------- arch/arm64/kernel/traps.c | 1 + 8 files changed, 15 insertions(+), 11 deletions(-) (limited to 'arch/arm64/kernel/patching.c') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 1ea9611545bb..a6f3f45fc46f 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -11,7 +11,6 @@ #include #include -#include #ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h index 5ebab129222f..6bf5adc56295 100644 --- a/arch/arm64/include/asm/patching.h +++ b/arch/arm64/include/asm/patching.h @@ -4,12 +4,10 @@ #include -#ifndef __ASSEMBLY__ int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); -#endif /* __ASSEMBLY__ */ #endif /* __ASM_PATCHING_H */ diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index b5d3ddaf69d9..7f467bd9db7a 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef CONFIG_DYNAMIC_FTRACE /* diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index 9a8a0ae1e75f..fc98037e1220 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -8,6 +8,7 @@ #include #include #include +#include void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 1a157ca33262..2aede780fb80 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -17,6 +17,7 @@ #include #include +#include #include struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c index 9d050e33901b..7aa55b33c8c7 100644 --- a/arch/arm64/kernel/patching.c +++ b/arch/arm64/kernel/patching.c @@ -9,6 +9,7 @@ #include #include #include +#include #include static DEFINE_RAW_SPINLOCK(patch_lock); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d607c9912025..609edde7a5dd 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -7,26 +7,28 @@ * Copyright (C) 2013 Linaro Limited. * Author: Sandeepa Prabhu */ +#include #include #include #include -#include -#include -#include #include #include +#include +#include #include +#include #include -#include -#include + #include -#include #include -#include +#include #include -#include #include +#include +#include #include +#include +#include #include "decode-insn.h" diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 9b683b2381cf..48ff6fb888e0 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 3e00e39d9dad48360ebd518726ebf81da1b84c10 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 9 Jun 2021 11:23:01 +0100 Subject: arm64: insn: move AARCH64_INSN_SIZE into For histroical reasons, we define AARCH64_INSN_SIZE in , but it would make more sense to do so in . Let's move it into , and add the necessary include directives for this. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Link: https://lore.kernel.org/r/20210609102301.17332-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/alternative-macros.h | 4 +--- arch/arm64/include/asm/insn.h | 3 +++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kernel/cpufeature.c | 1 + arch/arm64/kernel/patching.c | 1 + arch/arm64/kernel/traps.c | 1 + arch/arm64/net/bpf_jit_comp.c | 1 + 7 files changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel/patching.c') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 8a078fc662ac..703fbf310b79 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -3,12 +3,10 @@ #define __ASM_ALTERNATIVE_MACROS_H #include +#include #define ARM64_CB_PATCH ARM64_NCAPS -/* A64 instructions are always 32 bits. */ -#define AARCH64_INSN_SIZE 4 - #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index a6f3f45fc46f..1430b4973039 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -12,6 +12,9 @@ #include +/* A64 instructions are always 32 bits. */ +#define AARCH64_INSN_SIZE 4 + #ifndef __ASSEMBLY__ /* * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index cf8df032b9c3..894edda8cc85 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -8,6 +8,7 @@ #define __ARM_KVM_ASM_H__ #include +#include #include #define ARM_EXIT_WITH_SERROR_BIT 31 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index efed2830d141..16d35cfffcea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c index 7aa55b33c8c7..9a6edb9c48c7 100644 --- a/arch/arm64/kernel/patching.c +++ b/arch/arm64/kernel/patching.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 48ff6fb888e0..8f66072fa5cb 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index f7b194878a99..dd5000da18b8 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include "bpf_jit.h" -- cgit v1.2.3