From d08b9f0ca6605e13dcb48f04e55a30545b3c71eb Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:07 -0700 Subject: scs: Add support for Clang's Shadow Call Stack (SCS) This change adds generic support for Clang's Shadow Call Stack, which uses a shadow stack to protect return addresses from being overwritten by an attacker. Details are available here: https://clang.llvm.org/docs/ShadowCallStack.html Note that security guarantees in the kernel differ from the ones documented for user space. The kernel must store addresses of shadow stacks in memory, which means an attacker capable reading and writing arbitrary memory may be able to locate them and hijack control flow by modifying the stacks. Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Reviewed-by: Miguel Ojeda [will: Numerous cosmetic changes] Signed-off-by: Will Deacon --- kernel/scs.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 kernel/scs.c (limited to 'kernel/scs.c') diff --git a/kernel/scs.c b/kernel/scs.c new file mode 100644 index 000000000000..38f8f31c9451 --- /dev/null +++ b/kernel/scs.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Shadow Call Stack support. + * + * Copyright (C) 2019 Google LLC + */ + +#include +#include +#include +#include + +static struct kmem_cache *scs_cache; + +static void *scs_alloc(int node) +{ + void *s; + + s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node); + if (s) { + *__scs_magic(s) = SCS_END_MAGIC; + /* + * Poison the allocation to catch unintentional accesses to + * the shadow stack when KASAN is enabled. + */ + kasan_poison_object_data(scs_cache, s); + } + + return s; +} + +static void scs_free(void *s) +{ + kasan_unpoison_object_data(scs_cache, s); + kmem_cache_free(scs_cache, s); +} + +void __init scs_init(void) +{ + scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL); +} + +int scs_prepare(struct task_struct *tsk, int node) +{ + void *s = scs_alloc(node); + + if (!s) + return -ENOMEM; + + task_scs(tsk) = s; + task_scs_offset(tsk) = 0; + + return 0; +} + +void scs_release(struct task_struct *tsk) +{ + void *s = task_scs(tsk); + + if (!s) + return; + + WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); + scs_free(s); +} -- cgit v1.2.3 From 628d06a48f57c36abdc2a024930212e654a501b7 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:08 -0700 Subject: scs: Add page accounting for shadow call stack allocations This change adds accounting for the memory allocated for shadow stacks. Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Acked-by: Will Deacon Signed-off-by: Will Deacon --- drivers/base/node.c | 6 ++++++ fs/proc/meminfo.c | 4 ++++ include/linux/mmzone.h | 3 +++ kernel/scs.c | 15 +++++++++++++++ mm/page_alloc.c | 6 ++++++ mm/vmstat.c | 3 +++ 6 files changed, 37 insertions(+) (limited to 'kernel/scs.c') diff --git a/drivers/base/node.c b/drivers/base/node.c index 10d7e818e118..50b8c0d43859 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev, "Node %d AnonPages: %8lu kB\n" "Node %d Shmem: %8lu kB\n" "Node %d KernelStack: %8lu kB\n" +#ifdef CONFIG_SHADOW_CALL_STACK + "Node %d ShadowCallStack:%8lu kB\n" +#endif "Node %d PageTables: %8lu kB\n" "Node %d NFS_Unstable: %8lu kB\n" "Node %d Bounce: %8lu kB\n" @@ -438,6 +441,9 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), nid, K(i.sharedram), nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), +#ifdef CONFIG_SHADOW_CALL_STACK + nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_KB), +#endif nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 8c1f1bb1a5ce..09cd51c8d23d 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -103,6 +103,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "SUnreclaim: ", sunreclaim); seq_printf(m, "KernelStack: %8lu kB\n", global_zone_page_state(NR_KERNEL_STACK_KB)); +#ifdef CONFIG_SHADOW_CALL_STACK + seq_printf(m, "ShadowCallStack:%8lu kB\n", + global_zone_page_state(NR_KERNEL_SCS_KB)); +#endif show_val_kb(m, "PageTables: ", global_zone_page_state(NR_PAGETABLE)); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1b9de7d220fb..acffc3bc6178 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -156,6 +156,9 @@ enum zone_stat_item { NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_PAGETABLE, /* used for pagetables */ NR_KERNEL_STACK_KB, /* measured in KiB */ +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + NR_KERNEL_SCS_KB, /* measured in KiB */ +#endif /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) diff --git a/kernel/scs.c b/kernel/scs.c index 38f8f31c9451..6d2f983ac54e 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -6,8 +6,10 @@ */ #include +#include #include #include +#include #include static struct kmem_cache *scs_cache; @@ -40,6 +42,17 @@ void __init scs_init(void) scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL); } +static struct page *__scs_page(struct task_struct *tsk) +{ + return virt_to_page(task_scs(tsk)); +} + +static void scs_account(struct task_struct *tsk, int account) +{ + mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_KB, + account * (SCS_SIZE / 1024)); +} + int scs_prepare(struct task_struct *tsk, int node) { void *s = scs_alloc(node); @@ -49,6 +62,7 @@ int scs_prepare(struct task_struct *tsk, int node) task_scs(tsk) = s; task_scs_offset(tsk) = 0; + scs_account(tsk, 1); return 0; } @@ -61,5 +75,6 @@ void scs_release(struct task_struct *tsk) return; WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); + scs_account(tsk, -1); scs_free(s); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 69827d4fa052..83743d7a6177 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5411,6 +5411,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) " managed:%lukB" " mlocked:%lukB" " kernel_stack:%lukB" +#ifdef CONFIG_SHADOW_CALL_STACK + " shadow_call_stack:%lukB" +#endif " pagetables:%lukB" " bounce:%lukB" " free_pcp:%lukB" @@ -5433,6 +5436,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) K(zone_managed_pages(zone)), K(zone_page_state(zone, NR_MLOCK)), zone_page_state(zone, NR_KERNEL_STACK_KB), +#ifdef CONFIG_SHADOW_CALL_STACK + zone_page_state(zone, NR_KERNEL_SCS_KB), +#endif K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_BOUNCE)), K(free_pcp), diff --git a/mm/vmstat.c b/mm/vmstat.c index 96d21a792b57..2435d2c24657 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1119,6 +1119,9 @@ const char * const vmstat_text[] = { "nr_mlock", "nr_page_table_pages", "nr_kernel_stack", +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + "nr_shadow_call_stack", +#endif "nr_bounce", #if IS_ENABLED(CONFIG_ZSMALLOC) "nr_zspages", -- cgit v1.2.3 From 5bbaf9d1fcb9be696ee9a61636ab6803556c70f2 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 27 Apr 2020 09:00:09 -0700 Subject: scs: Add support for stack usage debugging Implements CONFIG_DEBUG_STACK_USAGE for shadow stacks. When enabled, also prints out the highest shadow stack usage per process. Signed-off-by: Sami Tolvanen Reviewed-by: Kees Cook Acked-by: Will Deacon [will: rewrote most of scs_check_usage()] Signed-off-by: Will Deacon --- kernel/scs.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'kernel/scs.c') diff --git a/kernel/scs.c b/kernel/scs.c index 6d2f983ac54e..9389c28f0853 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -63,10 +63,37 @@ int scs_prepare(struct task_struct *tsk, int node) task_scs(tsk) = s; task_scs_offset(tsk) = 0; scs_account(tsk, 1); - return 0; } +static void scs_check_usage(struct task_struct *tsk) +{ + static unsigned long highest; + + unsigned long *p, prev, curr = highest, used = 0; + + if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE)) + return; + + for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) { + if (!READ_ONCE_NOCHECK(*p)) + break; + used++; + } + + while (used > curr) { + prev = cmpxchg_relaxed(&highest, curr, used); + + if (prev == curr) { + pr_info("%s (%d): highest shadow stack usage: %lu bytes\n", + tsk->comm, task_pid_nr(tsk), used); + break; + } + + curr = prev; + } +} + void scs_release(struct task_struct *tsk) { void *s = task_scs(tsk); @@ -75,6 +102,7 @@ void scs_release(struct task_struct *tsk) return; WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); + scs_check_usage(tsk); scs_account(tsk, -1); scs_free(s); } -- cgit v1.2.3 From 51189c7a7ed1b4ed4493e27275d466ff60406d3a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 14:11:05 +0100 Subject: arm64: scs: Store absolute SCS stack pointer value in thread_info Storing the SCS information in thread_info as a {base,offset} pair introduces an additional load instruction on the ret-to-user path, since the SCS stack pointer in x18 has to be converted back to an offset by subtracting the base. Replace the offset with the absolute SCS stack pointer value instead and avoid the redundant load. Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/scs.h | 9 ++++----- arch/arm64/include/asm/thread_info.h | 4 ++-- arch/arm64/kernel/asm-offsets.c | 2 +- include/linux/scs.h | 8 ++++---- kernel/scs.c | 3 +-- 5 files changed, 12 insertions(+), 14 deletions(-) (limited to 'kernel/scs.c') diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index 96549353b0cb..6b8cf4352fe3 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -4,16 +4,15 @@ #ifdef __ASSEMBLY__ +#include + #ifdef CONFIG_SHADOW_CALL_STACK .macro scs_load tsk, tmp - ldp x18, \tmp, [\tsk, #TSK_TI_SCS_BASE] - add x18, x18, \tmp + ldr x18, [\tsk, #TSK_TI_SCS_SP] .endm .macro scs_save tsk, tmp - ldr \tmp, [\tsk, #TSK_TI_SCS_BASE] - sub \tmp, x18, \tmp - str \tmp, [\tsk, #TSK_TI_SCS_OFFSET] + str x18, [\tsk, #TSK_TI_SCS_SP] .endm #else .macro scs_load tsk, tmp diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 9df79c0a4c43..6ea8b6a26ae9 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -43,7 +43,7 @@ struct thread_info { }; #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; - unsigned long scs_offset; + void *scs_sp; #endif }; @@ -107,7 +107,7 @@ void arch_release_task_struct(struct task_struct *tsk); #ifdef CONFIG_SHADOW_CALL_STACK #define INIT_SCS \ .scs_base = init_shadow_call_stack, \ - .scs_offset = 0, + .scs_sp = init_shadow_call_stack, #else #define INIT_SCS #endif diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index d7934250b68c..a098a45f63d8 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -36,7 +36,7 @@ int main(void) #endif #ifdef CONFIG_SHADOW_CALL_STACK DEFINE(TSK_TI_SCS_BASE, offsetof(struct task_struct, thread_info.scs_base)); - DEFINE(TSK_TI_SCS_OFFSET, offsetof(struct task_struct, thread_info.scs_offset)); + DEFINE(TSK_TI_SCS_SP, offsetof(struct task_struct, thread_info.scs_sp)); #endif DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); #ifdef CONFIG_STACKPROTECTOR diff --git a/include/linux/scs.h b/include/linux/scs.h index 3f3662621a27..0eb2485ef832 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -27,7 +27,7 @@ #define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) #define task_scs(tsk) (task_thread_info(tsk)->scs_base) -#define task_scs_offset(tsk) (task_thread_info(tsk)->scs_offset) +#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) void scs_init(void); int scs_prepare(struct task_struct *tsk, int node); @@ -39,7 +39,7 @@ static inline void scs_task_reset(struct task_struct *tsk) * Reset the shadow stack to the base address in case the task * is reused. */ - task_scs_offset(tsk) = 0; + task_scs_sp(tsk) = task_scs(tsk); } static inline unsigned long *__scs_magic(void *s) @@ -50,9 +50,9 @@ static inline unsigned long *__scs_magic(void *s) static inline bool scs_corrupted(struct task_struct *tsk) { unsigned long *magic = __scs_magic(task_scs(tsk)); + unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); - return (task_scs_offset(tsk) >= SCS_SIZE - 1 || - READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC); + return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC; } #else /* CONFIG_SHADOW_CALL_STACK */ diff --git a/kernel/scs.c b/kernel/scs.c index 9389c28f0853..5ff8663e4a67 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -60,8 +60,7 @@ int scs_prepare(struct task_struct *tsk, int node) if (!s) return -ENOMEM; - task_scs(tsk) = s; - task_scs_offset(tsk) = 0; + task_scs(tsk) = task_scs_sp(tsk) = s; scs_account(tsk, 1); return 0; } -- cgit v1.2.3 From bee348fab099b0f551caa874663e82a7f3bb64b3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 14:43:11 +0100 Subject: scs: Move accounting into alloc/free functions There's no need to perform the shadow stack page accounting independently of the lifetime of the underlying allocation, so call the accounting code from the {alloc,free}() functions and simplify the code in the process. Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- kernel/scs.c | 45 +++++++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 24 deletions(-) (limited to 'kernel/scs.c') diff --git a/kernel/scs.c b/kernel/scs.c index 5ff8663e4a67..aea841cd7586 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -14,25 +14,35 @@ static struct kmem_cache *scs_cache; +static void __scs_account(void *s, int account) +{ + struct page *scs_page = virt_to_page(s); + + mod_zone_page_state(page_zone(scs_page), NR_KERNEL_SCS_KB, + account * (SCS_SIZE / SZ_1K)); +} + static void *scs_alloc(int node) { - void *s; - - s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node); - if (s) { - *__scs_magic(s) = SCS_END_MAGIC; - /* - * Poison the allocation to catch unintentional accesses to - * the shadow stack when KASAN is enabled. - */ - kasan_poison_object_data(scs_cache, s); - } + void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node); + + if (!s) + return NULL; + *__scs_magic(s) = SCS_END_MAGIC; + + /* + * Poison the allocation to catch unintentional accesses to + * the shadow stack when KASAN is enabled. + */ + kasan_poison_object_data(scs_cache, s); + __scs_account(s, 1); return s; } static void scs_free(void *s) { + __scs_account(s, -1); kasan_unpoison_object_data(scs_cache, s); kmem_cache_free(scs_cache, s); } @@ -42,17 +52,6 @@ void __init scs_init(void) scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL); } -static struct page *__scs_page(struct task_struct *tsk) -{ - return virt_to_page(task_scs(tsk)); -} - -static void scs_account(struct task_struct *tsk, int account) -{ - mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_KB, - account * (SCS_SIZE / 1024)); -} - int scs_prepare(struct task_struct *tsk, int node) { void *s = scs_alloc(node); @@ -61,7 +60,6 @@ int scs_prepare(struct task_struct *tsk, int node) return -ENOMEM; task_scs(tsk) = task_scs_sp(tsk) = s; - scs_account(tsk, 1); return 0; } @@ -102,6 +100,5 @@ void scs_release(struct task_struct *tsk) WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); scs_check_usage(tsk); - scs_account(tsk, -1); scs_free(s); } -- cgit v1.2.3 From 88485be531f4aee841ddc53b56e2f6e6a338854d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 14:56:05 +0100 Subject: scs: Move scs_overflow_check() out of architecture code There is nothing architecture-specific about scs_overflow_check() as it's just a trivial wrapper around scs_corrupted(). For parity with task_stack_end_corrupted(), rename scs_corrupted() to task_scs_end_corrupted() and call it from schedule_debug() when CONFIG_SCHED_STACK_END_CHECK_is enabled, which better reflects its purpose as a debug feature to catch inadvertent overflow of the SCS. Finally, remove the unused scs_overflow_check() function entirely. This has absolutely no impact on architectures that do not support SCS (currently arm64 only). Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/scs.h | 18 ------------------ arch/arm64/kernel/process.c | 2 -- arch/arm64/kernel/scs.c | 2 +- include/linux/scs.h | 4 ++-- kernel/sched/core.c | 3 +++ kernel/scs.c | 3 ++- 6 files changed, 8 insertions(+), 24 deletions(-) (limited to 'kernel/scs.c') diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index d46efdd2060a..eaa2cd92e4c1 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -24,24 +24,6 @@ .endm #endif /* CONFIG_SHADOW_CALL_STACK */ -#else /* __ASSEMBLY__ */ - -#include - -#ifdef CONFIG_SHADOW_CALL_STACK - -static inline void scs_overflow_check(struct task_struct *tsk) -{ - if (unlikely(scs_corrupted(tsk))) - panic("corrupted shadow stack detected inside scheduler\n"); -} - -#else /* CONFIG_SHADOW_CALL_STACK */ - -static inline void scs_overflow_check(struct task_struct *tsk) {} - -#endif /* CONFIG_SHADOW_CALL_STACK */ - #endif /* __ASSEMBLY __ */ #endif /* _ASM_SCS_H */ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a35d3318492c..56be4cbf771f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -52,7 +52,6 @@ #include #include #include -#include #include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) @@ -516,7 +515,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); - scs_overflow_check(next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c index adc97f826fab..955875dff9e1 100644 --- a/arch/arm64/kernel/scs.c +++ b/arch/arm64/kernel/scs.c @@ -6,7 +6,7 @@ */ #include -#include +#include /* Allocate a static per-CPU shadow stack */ #define DEFINE_SCS(name) \ diff --git a/include/linux/scs.h b/include/linux/scs.h index 0eb2485ef832..2fd3df50e93e 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -47,7 +47,7 @@ static inline unsigned long *__scs_magic(void *s) return (unsigned long *)(s + SCS_SIZE) - 1; } -static inline bool scs_corrupted(struct task_struct *tsk) +static inline bool task_scs_end_corrupted(struct task_struct *tsk) { unsigned long *magic = __scs_magic(task_scs(tsk)); unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); @@ -60,8 +60,8 @@ static inline bool scs_corrupted(struct task_struct *tsk) static inline void scs_init(void) {} static inline void scs_task_reset(struct task_struct *tsk) {} static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } -static inline bool scs_corrupted(struct task_struct *tsk) { return false; } static inline void scs_release(struct task_struct *tsk) {} +static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; } #endif /* CONFIG_SHADOW_CALL_STACK */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 934e03cfaec7..a1d815a11b90 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3878,6 +3878,9 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) #ifdef CONFIG_SCHED_STACK_END_CHECK if (task_stack_end_corrupted(prev)) panic("corrupted stack end detected inside scheduler\n"); + + if (task_scs_end_corrupted(prev)) + panic("corrupted shadow stack detected inside scheduler\n"); #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP diff --git a/kernel/scs.c b/kernel/scs.c index aea841cd7586..faf0ecd7b893 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -98,7 +98,8 @@ void scs_release(struct task_struct *tsk) if (!s) return; - WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); + WARN(task_scs_end_corrupted(tsk), + "corrupted shadow stack detected when freeing task\n"); scs_check_usage(tsk); scs_free(s); } -- cgit v1.2.3 From aa7a65ae5b8f459617e5ed1422301386e7f12274 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 May 2020 16:15:46 +0100 Subject: scs: Remove references to asm/scs.h from core code asm/scs.h is no longer needed by the core code, so remove a redundant header inclusion and update the stale Kconfig text. Tested-by: Sami Tolvanen Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/Kconfig | 4 ++-- kernel/scs.c | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel/scs.c') diff --git a/arch/Kconfig b/arch/Kconfig index 45dfca9a98d3..2e6f843d87c4 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -537,8 +537,8 @@ config ARCH_SUPPORTS_SHADOW_CALL_STACK bool help An architecture should select this if it supports Clang's Shadow - Call Stack, has asm/scs.h, and implements runtime support for shadow - stack switching. + Call Stack and implements runtime support for shadow stack + switching. config SHADOW_CALL_STACK bool "Clang Shadow Call Stack" diff --git a/kernel/scs.c b/kernel/scs.c index faf0ecd7b893..222a7a9ad543 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -10,7 +10,6 @@ #include #include #include -#include static struct kmem_cache *scs_cache; -- cgit v1.2.3