summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/Makefile12
-rw-r--r--arch/arm/boot/compressed/Makefile1
-rw-r--r--arch/arm/include/asm/stackprotector.h12
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kernel/process.c6
7 files changed, 50 insertions, 3 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 91be74d8df65..5c0305585a0a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1810,6 +1810,21 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
+config STACKPROTECTOR_PER_TASK
+ bool "Use a unique stack canary value for each task"
+ depends on GCC_PLUGINS && STACKPROTECTOR && SMP && !XIP_DEFLATED_DATA
+ select GCC_PLUGIN_ARM_SSP_PER_TASK
+ default y
+ help
+ Due to the fact that GCC uses an ordinary symbol reference from
+ which to load the value of the stack canary, this value can only
+ change at reboot time on SMP systems, and all tasks running in the
+ kernel's address space are forced to use the same canary value for
+ the entire duration that the system is up.
+
+ Enable this option to switch to a different method that uses a
+ different canary value for each task.
+
endmenu
menu "Boot options"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 05a91d8b89f3..0436002d5091 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -303,6 +303,18 @@ else
KBUILD_IMAGE := $(boot)/zImage
endif
+ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
+prepare: stack_protector_prepare
+stack_protector_prepare: prepare0
+ $(eval KBUILD_CFLAGS += \
+ -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \
+ awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
+ include/generated/asm-offsets.h) \
+ -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
+ awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
+ include/generated/asm-offsets.h))
+endif
+
all: $(notdir $(KBUILD_IMAGE))
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 1f5a5ffe7fcf..01bf2585a0fa 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -101,6 +101,7 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
$(libfdt) $(libfdt_hdrs) hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
index ef5f7b69443e..72a20c3a0a90 100644
--- a/arch/arm/include/asm/stackprotector.h
+++ b/arch/arm/include/asm/stackprotector.h
@@ -6,8 +6,10 @@
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on ARM. This unfortunately means that on SMP
- * we cannot have a different canary value per task.
+ * "__stack_chk_guard" on ARM. This prevents SMP systems from using a
+ * different value for each task unless we enable a GCC plugin that
+ * replaces these symbol references with references to each task's own
+ * value.
*/
#ifndef _ASM_STACKPROTECTOR_H
@@ -16,6 +18,8 @@
#include <linux/random.h>
#include <linux/version.h>
+#include <asm/thread_info.h>
+
extern unsigned long __stack_chk_guard;
/*
@@ -33,7 +37,11 @@ static __always_inline void boot_init_stack_canary(void)
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
+#ifndef CONFIG_STACKPROTECTOR_PER_TASK
__stack_chk_guard = current->stack_canary;
+#else
+ current_thread_info()->stack_canary = current->stack_canary;
+#endif
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 8f55dc520a3e..286eb61c632b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -53,6 +53,9 @@ struct thread_info {
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
+#ifdef CONFIG_STACKPROTECTOR_PER_TASK
+ unsigned long stack_canary;
+#endif
struct cpu_context_save cpu_context; /* cpu context */
__u32 syscall; /* syscall number */
__u8 used_cp[16]; /* thread used copro */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 3968d6c22455..28b27104ac0c 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -79,6 +79,10 @@ int main(void)
#ifdef CONFIG_CRUNCH
DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
#endif
+#ifdef CONFIG_STACKPROTECTOR_PER_TASK
+ DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary));
+#endif
+ DEFINE(THREAD_SZ_ORDER, THREAD_SIZE_ORDER);
BLANK();
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 82ab015bf42b..16601d1442d1 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -39,7 +39,7 @@
#include <asm/tls.h>
#include <asm/vdso.h>
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@@ -267,6 +267,10 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
thread_notify(THREAD_NOTIFY_COPY, thread);
+#ifdef CONFIG_STACKPROTECTOR_PER_TASK
+ thread->stack_canary = p->stack_canary;
+#endif
+
return 0;
}