From 4545c89880138b30a868159bc1b209867b8a5f32 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 27 Apr 2015 13:17:19 +0200 Subject: x86: introduce kaslr_offset() Offset that has been chosen for kaslr during kernel decompression can be easily computed as a difference between _text and __START_KERNEL. We are already making use of this in dump_kernel_offset() notifier and in arch_crash_save_vmcoreinfo(). Introduce kaslr_offset() that makes this computation instead of hard-coding it, so that other kernel code (such as live patching) can make use of it. Also convert existing users to make use of it. This patch is equivalent transofrmation without any effects on the resulting code: $ diff -u vmlinux.old.asm vmlinux.new.asm --- vmlinux.old.asm 2015-04-28 17:55:19.520983368 +0200 +++ vmlinux.new.asm 2015-04-28 17:55:24.141206072 +0200 @@ -1,5 +1,5 @@ -vmlinux.old: file format elf64-x86-64 +vmlinux.new: file format elf64-x86-64 Disassembly of section .text: $ Acked-by: Borislav Petkov Signed-off-by: Jiri Kosina --- arch/x86/include/asm/setup.h | 6 ++++++ arch/x86/kernel/machine_kexec_64.c | 3 ++- arch/x86/kernel/setup.c | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f69e06b283fb..785ac2f27271 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -65,12 +65,18 @@ static inline void x86_ce4100_early_setup(void) { } * This is set up by the setup-routine at boot-time */ extern struct boot_params boot_params; +extern char _text[]; static inline bool kaslr_enabled(void) { return !!(boot_params.hdr.loadflags & KASLR_FLAG); } +static inline unsigned long kaslr_offset(void) +{ + return (unsigned long)&_text - __START_KERNEL; +} + /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 415480d3ea84..e1029633f664 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -25,6 +25,7 @@ #include #include #include +#include #ifdef CONFIG_KEXEC_FILE static struct kexec_file_ops *kexec_file_loaders[] = { @@ -334,7 +335,7 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif vmcoreinfo_append_str("KERNELOFFSET=%lx\n", - (unsigned long)&_text - __START_KERNEL); + kaslr_offset()); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d74ac33290ae..5056d3cfe266 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -834,7 +834,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { if (kaslr_enabled()) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, + kaslr_offset(), __START_KERNEL, __START_KERNEL_map, MODULES_VADDR-1); -- cgit v1.2.3 From 5d4351ba654c2f25eb4f6883db742a16bccbb36b Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 27 Apr 2015 13:25:23 +0200 Subject: livepatch: x86: make kASLR logic more accurate We give up old_addr hint from the coming patch module in cases when kernel load base has been randomized (as in such case, the coming module has no idea about the exact randomization offset). We are currently too pessimistic, and give up immediately as soon as CONFIG_RANDOMIZE_BASE is set; this doesn't however directly imply that the load base has actually been randomized. There are config options that disable kASLR (such as hibernation), user could have disabled kaslr on kernel command-line, etc. The loader propagates the information whether kernel has been randomized through bootparams. This allows us to have the condition more accurate. On top of that, it seems unnecessary to give up old_addr hints even if randomization is active. The relocation offset can be computed using kaslr_ofsset(), and therefore old_addr can be adjusted accordingly. Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina --- arch/x86/include/asm/livepatch.h | 1 + kernel/livepatch/core.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index 2d29197bd2fb..19c099afa861 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -21,6 +21,7 @@ #ifndef _ASM_X86_LIVEPATCH_H #define _ASM_X86_LIVEPATCH_H +#include #include #include diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 284e2691e380..0e7c23c6cf3f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -234,8 +234,9 @@ static int klp_find_verify_func_addr(struct klp_object *obj, int ret; #if defined(CONFIG_RANDOMIZE_BASE) - /* KASLR is enabled, disregard old_addr from user */ - func->old_addr = 0; + /* If KASLR has been enabled, adjust old_addr accordingly */ + if (kaslr_enabled() && func->old_addr) + func->old_addr += kaslr_offset(); #endif if (!func->old_addr || klp_is_module(obj)) -- cgit v1.2.3 From 535b3ddc285825c058cef3436a9aa207edffa6cd Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Wed, 29 Apr 2015 18:09:53 +0200 Subject: x86: kaslr: fix build due to missing ALIGN definition Fengguang's bot reported that 4545c898 ("x86: introduce kaslr_offset()") broke randconfig build In file included from arch/x86/xen/vga.c:5:0: arch/x86/include/asm/setup.h: In function 'kaslr_offset': >> arch/x86/include/asm/setup.h:77:2: error: implicit declaration of function 'ALIGN' [-Werror=implicit-function-declaration] return (unsigned long)&_text - __START_KERNEL; ^ Fix that by making setup.h self-sufficient by explicitly including linux/kernel.h, which is needed for ALIGN() (which is what __START_KERNEL contains in its expansion). Reported-by: fengguang.wu@intel.com Signed-off-by: Jiri Kosina --- arch/x86/include/asm/setup.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 785ac2f27271..11af24e09c8a 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -60,6 +60,7 @@ static inline void x86_ce4100_early_setup(void) { } #ifndef _SETUP #include +#include /* * This is set up by the setup-routine at boot-time -- cgit v1.2.3