From 7825451fa4dc04660f1f53d236e4302161d0ebd1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Sep 2022 13:11:31 +0200 Subject: static_call: Add call depth tracking support When indirect calls are switched to direct calls then it has to be ensured that the call target is not the function, but the call thunk when call depth tracking is enabled. But static calls are available before call thunks have been set up. Ensure a second run through the static call patching code after call thunks have been created. When call thunks are not enabled this has no side effects. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220915111148.306100465@infradead.org --- kernel/static_call_inline.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c index dc5665b62814..639397b5491c 100644 --- a/kernel/static_call_inline.c +++ b/kernel/static_call_inline.c @@ -15,7 +15,18 @@ extern struct static_call_site __start_static_call_sites[], extern struct static_call_tramp_key __start_static_call_tramp_key[], __stop_static_call_tramp_key[]; -static bool static_call_initialized; +static int static_call_initialized; + +/* + * Must be called before early_initcall() to be effective. + */ +void static_call_force_reinit(void) +{ + if (WARN_ON_ONCE(!static_call_initialized)) + return; + + static_call_initialized++; +} /* mutex to protect key modules/sites */ static DEFINE_MUTEX(static_call_mutex); @@ -475,7 +486,8 @@ int __init static_call_init(void) { int ret; - if (static_call_initialized) + /* See static_call_force_reinit(). */ + if (static_call_initialized == 1) return 0; cpus_read_lock(); @@ -490,11 +502,12 @@ int __init static_call_init(void) BUG(); } - static_call_initialized = true; - #ifdef CONFIG_MODULES - register_module_notifier(&static_call_module_nb); + if (!static_call_initialized) + register_module_notifier(&static_call_module_nb); #endif + + static_call_initialized = 1; return 0; } early_initcall(static_call_init); -- cgit v1.2.3 From f1389181622a08d6f1c71407a64df36b809d632b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Sep 2022 13:11:32 +0200 Subject: kallsyms: Take callthunks into account Since the pre-symbol function padding is an integral part of the symbol make kallsyms report it as part of the symbol by reporting it as sym-x instead of prev_sym+y. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220915111148.409656012@infradead.org --- kernel/kallsyms.c | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 60c20f301a6b..cc244c02b4cf 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -293,6 +293,12 @@ static unsigned long get_symbol_pos(unsigned long addr, return low; } +#ifdef CONFIG_FUNCTION_PADDING_BYTES +#define PADDING_BYTES CONFIG_FUNCTION_PADDING_BYTES +#else +#define PADDING_BYTES 0 +#endif + /* * Lookup an address but don't bother to find any names. */ @@ -300,13 +306,25 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { char namebuf[KSYM_NAME_LEN]; + int ret; + + addr += PADDING_BYTES; if (is_ksym_addr(addr)) { get_symbol_pos(addr, symbolsize, offset); - return 1; + ret = 1; + goto found; + } + + ret = !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf); + if (!ret) { + ret = !!__bpf_address_lookup(addr, symbolsize, + offset, namebuf); } - return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || - !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); +found: + if (ret && offset) + *offset -= PADDING_BYTES; + return ret; } static const char *kallsyms_lookup_buildid(unsigned long addr, @@ -319,6 +337,8 @@ static const char *kallsyms_lookup_buildid(unsigned long addr, namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; + addr += PADDING_BYTES; + if (is_ksym_addr(addr)) { unsigned long pos; @@ -348,6 +368,8 @@ static const char *kallsyms_lookup_buildid(unsigned long addr, found: cleanup_symbol_name(namebuf); + if (ret && offset) + *offset -= PADDING_BYTES; return ret; } @@ -374,6 +396,8 @@ int lookup_symbol_name(unsigned long addr, char *symname) symname[0] = '\0'; symname[KSYM_NAME_LEN - 1] = '\0'; + addr += PADDING_BYTES; + if (is_ksym_addr(addr)) { unsigned long pos; @@ -401,6 +425,8 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, name[0] = '\0'; name[KSYM_NAME_LEN - 1] = '\0'; + addr += PADDING_BYTES; + if (is_ksym_addr(addr)) { unsigned long pos; @@ -417,6 +443,8 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, return res; found: + if (offset) + *offset -= PADDING_BYTES; cleanup_symbol_name(name); return 0; } @@ -442,8 +470,15 @@ static int __sprint_symbol(char *buffer, unsigned long address, len = strlen(buffer); offset -= symbol_offset; - if (add_offset) - len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); + if (add_offset) { + char s = '+'; + + if ((long)offset < 0) { + s = '-'; + offset = 0UL - offset; + } + len += sprintf(buffer + len, "%c%#lx/%#lx", s, offset, size); + } if (modname) { len += sprintf(buffer + len, " [%s", modname); -- cgit v1.2.3 From ee3e2469b3463d28ca4cde20e0283319ac6a562d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Sep 2022 13:11:37 +0200 Subject: x86/ftrace: Make it call depth tracking aware Since ftrace has trampolines, don't use thunks for the __fentry__ site but instead require that every function called from there includes accounting. This very much includes all the direct-call functions. Additionally, ftrace uses ROP tricks in two places: - return_to_handler(), and - ftrace_regs_caller() when pt_regs->orig_ax is set by a direct-call. return_to_handler() already uses a retpoline to replace an indirect-jump to defeat IBT, since this is a jump-type retpoline, make sure there is no accounting done and ALTERNATIVE the RET into a ret. ftrace_regs_caller() does much the same and gets the same treatment. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220915111148.927545073@infradead.org --- arch/x86/include/asm/nospec-branch.h | 9 +++++++++ arch/x86/kernel/callthunks.c | 2 +- arch/x86/kernel/ftrace.c | 16 ++++++++++++---- arch/x86/kernel/ftrace_64.S | 22 ++++++++++++++++++++-- arch/x86/net/bpf_jit_comp.c | 6 ++++++ kernel/trace/trace_selftest.c | 9 ++++++++- samples/ftrace/ftrace-direct-modify.c | 3 +++ samples/ftrace/ftrace-direct-multi-modify.c | 3 +++ samples/ftrace/ftrace-direct-multi.c | 2 ++ samples/ftrace/ftrace-direct-too.c | 2 ++ samples/ftrace/ftrace-direct.c | 2 ++ 11 files changed, 68 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4771147c7c5a..82580adbca4b 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -343,6 +343,12 @@ static inline void x86_set_skl_return_thunk(void) { x86_return_thunk = &__x86_return_skl; } + +#define CALL_DEPTH_ACCOUNT \ + ALTERNATIVE("", \ + __stringify(INCREMENT_CALL_DEPTH), \ + X86_FEATURE_CALL_DEPTH) + #ifdef CONFIG_CALL_THUNKS_DEBUG DECLARE_PER_CPU(u64, __x86_call_count); DECLARE_PER_CPU(u64, __x86_ret_count); @@ -351,6 +357,9 @@ DECLARE_PER_CPU(u64, __x86_ctxsw_count); #endif #else static inline void x86_set_skl_return_thunk(void) {} + +#define CALL_DEPTH_ACCOUNT "" + #endif #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c index a03d646b5e69..7d2c75ec9a8c 100644 --- a/arch/x86/kernel/callthunks.c +++ b/arch/x86/kernel/callthunks.c @@ -316,7 +316,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func) return 0; /* Is function call target a thunk? */ - if (is_callthunk(func)) + if (func && is_callthunk(func)) return 0; memcpy(*pprog, tmpl, tmpl_size); diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 4ac6692d5ef8..cf15ef5aecff 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -69,6 +69,10 @@ static const char *ftrace_nop_replace(void) static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) { + /* + * No need to translate into a callthunk. The trampoline does + * the depth accounting itself. + */ return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); } @@ -317,7 +321,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) unsigned long size; unsigned long *ptr; void *trampoline; - void *ip; + void *ip, *dest; /* 48 8b 15 is movq (%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; @@ -404,10 +408,14 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* put in the call to the function */ mutex_lock(&text_mutex); call_offset -= start_offset; + /* + * No need to translate into a callthunk. The trampoline does + * the depth accounting before the call already. + */ + dest = ftrace_ops_get_func(ops); memcpy(trampoline + call_offset, - text_gen_insn(CALL_INSN_OPCODE, - trampoline + call_offset, - ftrace_ops_get_func(ops)), CALL_INSN_SIZE); + text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest), + CALL_INSN_SIZE); mutex_unlock(&text_mutex); /* ALLOC_TRAMP flags lets us know we created it */ diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index b5b54f58957e..6a7e6d666a12 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -132,6 +133,7 @@ #ifdef CONFIG_DYNAMIC_FTRACE SYM_FUNC_START(__fentry__) + CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) @@ -140,6 +142,8 @@ SYM_FUNC_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs + CALL_DEPTH_ACCOUNT + /* Stack - skipping return address of ftrace_caller */ leaq MCOUNT_REG_SIZE+8(%rsp), %rcx movq %rcx, RSP(%rsp) @@ -155,6 +159,9 @@ SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL) /* Only ops with REGS flag set should have CS register set */ movq $0, CS(%rsp) + /* Account for the function call below */ + CALL_DEPTH_ACCOUNT + SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub @@ -189,6 +196,8 @@ SYM_FUNC_START(ftrace_regs_caller) save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ + CALL_DEPTH_ACCOUNT + SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Load the ftrace_ops into the 3rd parameter */ @@ -219,6 +228,9 @@ SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) /* regs go into 4th parameter */ leaq (%rsp), %rcx + /* Account for the function call below */ + CALL_DEPTH_ACCOUNT + SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub @@ -282,7 +294,9 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) int3 .Ldo_rebalance: add $8, %rsp - RET + ALTERNATIVE __stringify(RET), \ + __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \ + X86_FEATURE_CALL_DEPTH SYM_FUNC_END(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) @@ -291,6 +305,8 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ SYM_FUNC_START(__fentry__) + CALL_DEPTH_ACCOUNT + cmpq $ftrace_stub, ftrace_trace_function jnz trace @@ -347,6 +363,8 @@ SYM_CODE_START(return_to_handler) int3 .Ldo_rop: mov %rdi, (%rsp) - RET + ALTERNATIVE __stringify(RET), \ + __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \ + X86_FEATURE_CALL_DEPTH SYM_CODE_END(return_to_handler) #endif diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a6b46740ea30..f46b62029d91 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -2135,6 +2136,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i prog = image; EMIT_ENDBR(); + /* + * This is the direct-call trampoline, as such it needs accounting + * for the __fentry__ call. + */ + x86_call_depth_emit_accounting(&prog, NULL); EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index a2d301f58ced..ff0536cea968 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -785,7 +785,14 @@ static struct fgraph_ops fgraph_ops __initdata = { }; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS -noinline __noclone static void trace_direct_tramp(void) { } +#ifndef CALL_DEPTH_ACCOUNT +#define CALL_DEPTH_ACCOUNT "" +#endif + +noinline __noclone static void trace_direct_tramp(void) +{ + asm(CALL_DEPTH_ACCOUNT); +} #endif /* diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c index 39146fa83e20..de5a0f67f320 100644 --- a/samples/ftrace/ftrace-direct-modify.c +++ b/samples/ftrace/ftrace-direct-modify.c @@ -3,6 +3,7 @@ #include #include #include +#include extern void my_direct_func1(void); extern void my_direct_func2(void); @@ -34,6 +35,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " call my_direct_func1\n" " leave\n" " .size my_tramp1, .-my_tramp1\n" @@ -45,6 +47,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " call my_direct_func2\n" " leave\n" ASM_RET diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c index 65aa94d96f4e..d52370cad0b6 100644 --- a/samples/ftrace/ftrace-direct-multi-modify.c +++ b/samples/ftrace/ftrace-direct-multi-modify.c @@ -3,6 +3,7 @@ #include #include #include +#include extern void my_direct_func1(unsigned long ip); extern void my_direct_func2(unsigned long ip); @@ -32,6 +33,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " pushq %rdi\n" " movq 8(%rbp), %rdi\n" " call my_direct_func1\n" @@ -46,6 +48,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " pushq %rdi\n" " movq 8(%rbp), %rdi\n" " call my_direct_func2\n" diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c index 41ded7c615c7..ec1088922517 100644 --- a/samples/ftrace/ftrace-direct-multi.c +++ b/samples/ftrace/ftrace-direct-multi.c @@ -5,6 +5,7 @@ #include #include #include +#include extern void my_direct_func(unsigned long ip); @@ -27,6 +28,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " pushq %rdi\n" " movq 8(%rbp), %rdi\n" " call my_direct_func\n" diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c index 6690468c5cc2..e13fb59a2b47 100644 --- a/samples/ftrace/ftrace-direct-too.c +++ b/samples/ftrace/ftrace-direct-too.c @@ -4,6 +4,7 @@ #include /* for handle_mm_fault() */ #include #include +#include extern void my_direct_func(struct vm_area_struct *vma, unsigned long address, unsigned int flags); @@ -29,6 +30,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " pushq %rdi\n" " pushq %rsi\n" " pushq %rdx\n" diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c index e8f1e440b9b8..1f769d0db20f 100644 --- a/samples/ftrace/ftrace-direct.c +++ b/samples/ftrace/ftrace-direct.c @@ -4,6 +4,7 @@ #include /* for wake_up_process() */ #include #include +#include extern void my_direct_func(struct task_struct *p); @@ -26,6 +27,7 @@ asm ( ASM_ENDBR " pushq %rbp\n" " movq %rsp, %rbp\n" + CALL_DEPTH_ACCOUNT " pushq %rdi\n" " call my_direct_func\n" " popq %rdi\n" -- cgit v1.2.3 From 5ebddd7c4951c50142bcb239d4c6a82eff15759e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 28 Oct 2022 15:26:51 +0200 Subject: kallsyms: Revert "Take callthunks into account" This is a full revert of commit: f1389181622a ("kallsyms: Take callthunks into account") The commit assumes a number of things that are not quite right. Notably it assumes every symbol has PADDING_BYTES in front of it that are not claimed by another symbol. This is not true; even when compiled with: -fpatchable-function-entry=${PADDING_BYTES},${PADDING_BYTES} Notably things like .cold subfunctions do not need to adhere to this change in ABI. It it also not true when build with CFI_CLANG, which claims these PADDING_BYTES in the __cfi_##name symbol. Once the prefix bytes are not consistent and or otherwise claimed the approach this patch takes goes out the window and kallsym resolution will report invalid symbol names. Therefore revert this to make room for another approach. Reported-by: Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yujie Liu Link: https://lore.kernel.org/r/202210241614.2ae4c1f5-yujie.liu@intel.com Link: https://lkml.kernel.org/r/20221028194453.330970755@infradead.org --- kernel/kallsyms.c | 45 +++++---------------------------------------- 1 file changed, 5 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index cc244c02b4cf..60c20f301a6b 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -293,12 +293,6 @@ static unsigned long get_symbol_pos(unsigned long addr, return low; } -#ifdef CONFIG_FUNCTION_PADDING_BYTES -#define PADDING_BYTES CONFIG_FUNCTION_PADDING_BYTES -#else -#define PADDING_BYTES 0 -#endif - /* * Lookup an address but don't bother to find any names. */ @@ -306,25 +300,13 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { char namebuf[KSYM_NAME_LEN]; - int ret; - - addr += PADDING_BYTES; if (is_ksym_addr(addr)) { get_symbol_pos(addr, symbolsize, offset); - ret = 1; - goto found; - } - - ret = !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf); - if (!ret) { - ret = !!__bpf_address_lookup(addr, symbolsize, - offset, namebuf); + return 1; } -found: - if (ret && offset) - *offset -= PADDING_BYTES; - return ret; + return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || + !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } static const char *kallsyms_lookup_buildid(unsigned long addr, @@ -337,8 +319,6 @@ static const char *kallsyms_lookup_buildid(unsigned long addr, namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; - addr += PADDING_BYTES; - if (is_ksym_addr(addr)) { unsigned long pos; @@ -368,8 +348,6 @@ static const char *kallsyms_lookup_buildid(unsigned long addr, found: cleanup_symbol_name(namebuf); - if (ret && offset) - *offset -= PADDING_BYTES; return ret; } @@ -396,8 +374,6 @@ int lookup_symbol_name(unsigned long addr, char *symname) symname[0] = '\0'; symname[KSYM_NAME_LEN - 1] = '\0'; - addr += PADDING_BYTES; - if (is_ksym_addr(addr)) { unsigned long pos; @@ -425,8 +401,6 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, name[0] = '\0'; name[KSYM_NAME_LEN - 1] = '\0'; - addr += PADDING_BYTES; - if (is_ksym_addr(addr)) { unsigned long pos; @@ -443,8 +417,6 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, return res; found: - if (offset) - *offset -= PADDING_BYTES; cleanup_symbol_name(name); return 0; } @@ -470,15 +442,8 @@ static int __sprint_symbol(char *buffer, unsigned long address, len = strlen(buffer); offset -= symbol_offset; - if (add_offset) { - char s = '+'; - - if ((long)offset < 0) { - s = '-'; - offset = 0UL - offset; - } - len += sprintf(buffer + len, "%c%#lx/%#lx", s, offset, size); - } + if (add_offset) + len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); if (modname) { len += sprintf(buffer + len, " [%s", modname); -- cgit v1.2.3