summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-10-03 18:25:46 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2017-10-04 17:37:33 +0100
commitb02faed15d86f846b0f23f47b92e0782baa873ed (patch)
tree7dbbaa5b79d23c74241be5acfc4666a2456ca0cf /arch
parent37f6b42e9c2966b08c7df5cfddc0d73c39cead4a (diff)
downloadlinux-b02faed15d86f846b0f23f47b92e0782baa873ed.tar.bz2
arm64: Use larger stacks when KASAN is selected
AddressSanitizer instrumentation can significantly bloat the stack, and with GCC 7 this can result in stack overflows at boot time in some configurations. We can avoid this by doubling our stack size when KASAN is in use, as is already done on x86 (and has been since KASAN was introduced). Regardless of other patches to decrease KASAN's stack utilization, kernels built with KASAN will always require more stack space than those built without, and we should take this into account. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/memory.h9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3585a5e26151..f7c4d2146aed 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -95,16 +95,19 @@
#define KERNEL_END _end
/*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_SHADOW_SIZE (0)
+#define KASAN_THREAD_SHIFT 0
#endif
-#define MIN_THREAD_SHIFT 14
+#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/*
* VMAP'd stacks are allocated at page granularity, so we must ensure that such