summaryrefslogtreecommitdiffstats
path: root/mm/kasan/tags.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/tags.c')
-rw-r--r--mm/kasan/tags.c143
1 files changed, 114 insertions, 29 deletions
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 8f48b9502a17..67a222586846 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -6,9 +6,11 @@
* Copyright (c) 2020 Google, Inc.
*/
+#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/static_key.h>
@@ -16,44 +18,127 @@
#include <linux/types.h>
#include "kasan.h"
+#include "../slab.h"
-void kasan_set_free_info(struct kmem_cache *cache,
- void *object, u8 tag)
-{
- struct kasan_alloc_meta *alloc_meta;
- u8 idx = 0;
+#define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
+
+enum kasan_arg_stacktrace {
+ KASAN_ARG_STACKTRACE_DEFAULT,
+ KASAN_ARG_STACKTRACE_OFF,
+ KASAN_ARG_STACKTRACE_ON,
+};
+
+static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
+
+/* Whether to collect alloc/free stack traces. */
+DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return;
+/* Non-zero, as initial pointer values are 0. */
+#define STACK_RING_BUSY_PTR ((void *)1)
+
+struct kasan_stack_ring stack_ring = {
+ .lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
+};
+
+/* kasan.stacktrace=off/on */
+static int __init early_kasan_flag_stacktrace(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
-#ifdef CONFIG_KASAN_TAGS_IDENTIFY
- idx = alloc_meta->free_track_idx;
- alloc_meta->free_pointer_tag[idx] = tag;
- alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
-#endif
+ if (!strcmp(arg, "off"))
+ kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
+ else if (!strcmp(arg, "on"))
+ kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
+ else
+ return -EINVAL;
- kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
+ return 0;
}
+early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
-struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
- void *object, u8 tag)
+/* kasan.stack_ring_size=<number of entries> */
+static int __init early_kasan_flag_stack_ring_size(char *arg)
{
- struct kasan_alloc_meta *alloc_meta;
- int i = 0;
+ if (!arg)
+ return -EINVAL;
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return NULL;
+ return kstrtoul(arg, 0, &stack_ring.size);
+}
+early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
+
+void __init kasan_init_tags(void)
+{
+ switch (kasan_arg_stacktrace) {
+ case KASAN_ARG_STACKTRACE_DEFAULT:
+ /* Default is specified by kasan_flag_stacktrace definition. */
+ break;
+ case KASAN_ARG_STACKTRACE_OFF:
+ static_branch_disable(&kasan_flag_stacktrace);
+ break;
+ case KASAN_ARG_STACKTRACE_ON:
+ static_branch_enable(&kasan_flag_stacktrace);
+ break;
+ }
-#ifdef CONFIG_KASAN_TAGS_IDENTIFY
- for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
- if (alloc_meta->free_pointer_tag[i] == tag)
- break;
+ if (kasan_stack_collection_enabled()) {
+ if (!stack_ring.size)
+ stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
+ stack_ring.entries = memblock_alloc(
+ sizeof(stack_ring.entries[0]) * stack_ring.size,
+ SMP_CACHE_BYTES);
+ if (WARN_ON(!stack_ring.entries))
+ static_branch_disable(&kasan_flag_stacktrace);
}
- if (i == KASAN_NR_FREE_STACKS)
- i = alloc_meta->free_track_idx;
-#endif
+}
+
+static void save_stack_info(struct kmem_cache *cache, void *object,
+ gfp_t gfp_flags, bool is_free)
+{
+ unsigned long flags;
+ depot_stack_handle_t stack;
+ u64 pos;
+ struct kasan_stack_ring_entry *entry;
+ void *old_ptr;
+
+ stack = kasan_save_stack(gfp_flags, true);
+
+ /*
+ * Prevent save_stack_info() from modifying stack ring
+ * when kasan_complete_mode_report_info() is walking it.
+ */
+ read_lock_irqsave(&stack_ring.lock, flags);
+
+next:
+ pos = atomic64_fetch_add(1, &stack_ring.pos);
+ entry = &stack_ring.entries[pos % stack_ring.size];
+
+ /* Detect stack ring entry slots that are being written to. */
+ old_ptr = READ_ONCE(entry->ptr);
+ if (old_ptr == STACK_RING_BUSY_PTR)
+ goto next; /* Busy slot. */
+ if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
+ goto next; /* Busy slot. */
+
+ WRITE_ONCE(entry->size, cache->object_size);
+ WRITE_ONCE(entry->pid, current->pid);
+ WRITE_ONCE(entry->stack, stack);
+ WRITE_ONCE(entry->is_free, is_free);
+
+ /*
+ * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
+ */
+ smp_store_release(&entry->ptr, (s64)object);
- return &alloc_meta->free_track[i];
+ read_unlock_irqrestore(&stack_ring.lock, flags);
+}
+
+void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
+{
+ save_stack_info(cache, object, flags, false);
+}
+
+void kasan_save_free_info(struct kmem_cache *cache, void *object)
+{
+ save_stack_info(cache, object, GFP_NOWAIT, true);
}