summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c12
-rw-r--r--mm/kasan/generic.c6
-rw-r--r--mm/kasan/kasan.h92
-rw-r--r--mm/kasan/quarantine.c54
-rw-r--r--mm/kasan/report_generic.c8
5 files changed, 105 insertions, 67 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d9079ec11f31..c40c0e7b3b5f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -117,7 +117,7 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
{
if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order,
- KASAN_FREE_PAGE, init);
+ KASAN_PAGE_FREE, init);
}
/*
@@ -254,7 +254,7 @@ void __kasan_poison_slab(struct slab *slab)
for (i = 0; i < compound_nr(page); i++)
page_kasan_tag_reset(page + i);
kasan_poison(page_address(page), page_size(page),
- KASAN_KMALLOC_REDZONE, false);
+ KASAN_SLAB_REDZONE, false);
}
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
@@ -265,7 +265,7 @@ void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
- KASAN_KMALLOC_REDZONE, false);
+ KASAN_SLAB_REDZONE, false);
}
/*
@@ -357,7 +357,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
}
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
- KASAN_KMALLOC_FREE, init);
+ KASAN_SLAB_FREE, init);
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
return false;
@@ -414,7 +414,7 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
if (unlikely(!folio_test_slab(folio))) {
if (____kasan_kfree_large(ptr, ip))
return;
- kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
+ kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
} else {
struct slab *slab = folio_slab(folio);
@@ -505,7 +505,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
redzone_end = round_up((unsigned long)(object + cache->object_size),
KASAN_GRANULE_SIZE);
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
- KASAN_KMALLOC_REDZONE, false);
+ KASAN_SLAB_REDZONE, false);
/*
* Save alloc info (if possible) for kmalloc() allocations.
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index a25ad4090615..437fcc7e77cf 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -369,14 +369,14 @@ void kasan_set_free_info(struct kmem_cache *cache,
kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
/* The object was freed and has free track set. */
- *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
}
struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
void *object, u8 tag)
{
- if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
+ if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREETRACK)
return NULL;
- /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
+ /* Free meta must be present with KASAN_SLAB_FREETRACK. */
return &kasan_get_free_meta(cache, object)->free_track;
}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index b01b4bbe0409..610d60d6e5b8 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -42,6 +42,7 @@ static inline bool kasan_sync_fault_possible(void)
{
return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
}
+
#else
static inline bool kasan_stack_collection_enabled(void)
@@ -73,47 +74,41 @@ static inline bool kasan_sync_fault_possible(void)
#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
#ifdef CONFIG_KASAN_GENERIC
-#define KASAN_FREE_PAGE 0xFF /* page was freed */
-#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
-#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
-#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
-#define KASAN_VMALLOC_INVALID 0xF8 /* unallocated space in vmapped page */
+#define KASAN_PAGE_FREE 0xFF /* freed page */
+#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocation */
+#define KASAN_SLAB_REDZONE 0xFC /* redzone for slab object */
+#define KASAN_SLAB_FREE 0xFB /* freed slab object */
+#define KASAN_VMALLOC_INVALID 0xF8 /* inaccessible space in vmap area */
#else
-#define KASAN_FREE_PAGE KASAN_TAG_INVALID
-#define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
-#define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID
-#define KASAN_KMALLOC_FREE KASAN_TAG_INVALID
-#define KASAN_VMALLOC_INVALID KASAN_TAG_INVALID /* only for SW_TAGS */
+#define KASAN_PAGE_FREE KASAN_TAG_INVALID
+#define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
+#define KASAN_SLAB_REDZONE KASAN_TAG_INVALID
+#define KASAN_SLAB_FREE KASAN_TAG_INVALID
+#define KASAN_VMALLOC_INVALID KASAN_TAG_INVALID /* only used for SW_TAGS */
#endif
#ifdef CONFIG_KASAN_GENERIC
-#define KASAN_KMALLOC_FREETRACK 0xFA /* object was freed and has free track set */
-#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
+#define KASAN_SLAB_FREETRACK 0xFA /* freed slab object with free track */
+#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
-/*
- * Stack redzone shadow values
- * (Those are compiler's ABI, don't change them)
- */
-#define KASAN_STACK_LEFT 0xF1
-#define KASAN_STACK_MID 0xF2
-#define KASAN_STACK_RIGHT 0xF3
-#define KASAN_STACK_PARTIAL 0xF4
+/* Stack redzone shadow values. Compiler ABI, do not change. */
+#define KASAN_STACK_LEFT 0xF1
+#define KASAN_STACK_MID 0xF2
+#define KASAN_STACK_RIGHT 0xF3
+#define KASAN_STACK_PARTIAL 0xF4
-/*
- * alloca redzone shadow values
- */
+/* alloca redzone shadow values. */
#define KASAN_ALLOCA_LEFT 0xCA
#define KASAN_ALLOCA_RIGHT 0xCB
+/* alloca redzone size. Compiler ABI, do not change. */
#define KASAN_ALLOCA_REDZONE_SIZE 32
-/*
- * Stack frame marker (compiler ABI).
- */
+/* Stack frame marker. Compiler ABI, do not change. */
#define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
-/* Don't break randconfig/all*config builds */
+/* Dummy value to avoid breaking randconfig/all*config builds. */
#ifndef KASAN_ABI_VERSION
#define KASAN_ABI_VERSION 1
#endif
@@ -141,21 +136,21 @@ struct kasan_report_info {
unsigned long ip;
};
-/* The layout of struct dictated by compiler */
+/* Do not change the struct layout: compiler ABI. */
struct kasan_source_location {
const char *filename;
int line_no;
int column_no;
};
-/* The layout of struct dictated by compiler */
+/* Do not change the struct layout: compiler ABI. */
struct kasan_global {
const void *beg; /* Address of the beginning of the global variable. */
size_t size; /* Size of the global variable. */
- size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
+ size_t size_with_redzone; /* Size of the variable + size of the redzone. 32 bytes aligned. */
const void *name;
const void *module_name; /* Name of the module where the global variable is declared. */
- unsigned long has_dynamic_init; /* This needed for C++ */
+ unsigned long has_dynamic_init; /* This is needed for C++. */
#if KASAN_ABI_VERSION >= 4
struct kasan_source_location *location;
#endif
@@ -164,9 +159,7 @@ struct kasan_global {
#endif
};
-/**
- * Structures to keep alloc and free tracks *
- */
+/* Structures for keeping alloc and free tracks. */
#define KASAN_STACK_DEPTH 64
@@ -183,11 +176,8 @@ struct kasan_track {
struct kasan_alloc_meta {
struct kasan_track alloc_track;
+ /* Generic mode stores free track in kasan_free_meta. */
#ifdef CONFIG_KASAN_GENERIC
- /*
- * The auxiliary stack is stored into struct kasan_alloc_meta.
- * The free stack is stored into struct kasan_free_meta.
- */
depot_stack_handle_t aux_stack[2];
#else
struct kasan_track free_track[KASAN_NR_FREE_STACKS];
@@ -203,18 +193,18 @@ struct qlist_node {
};
/*
- * Generic mode either stores free meta in the object itself or in the redzone
- * after the object. In the former case free meta offset is 0, in the latter
- * case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta
- * offset when free meta isn't present.
+ * Free meta is stored either in the object itself or in the redzone after the
+ * object. In the former case, free meta offset is 0. In the latter case, the
+ * offset is between 0 and INT_MAX. INT_MAX marks that free meta is not present.
*/
#define KASAN_NO_FREE_META INT_MAX
+/*
+ * Free meta is only used by Generic mode while the object is in quarantine.
+ * After that, slab allocator stores the freelist pointer in the object.
+ */
struct kasan_free_meta {
#ifdef CONFIG_KASAN_GENERIC
- /* This field is used while the object is in the quarantine.
- * Otherwise it might be used for the allocator freelist.
- */
struct qlist_node quarantine_link;
struct kasan_track free_track;
#endif
@@ -417,9 +407,10 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
return;
/*
* Explicitly initialize the memory with the precise object size to
- * avoid overwriting the SLAB redzone. This disables initialization in
- * the arch code and may thus lead to performance penalty. The penalty
- * is accepted since SLAB redzones aren't enabled in production builds.
+ * avoid overwriting the slab redzone. This disables initialization in
+ * the arch code and may thus lead to performance penalty. This penalty
+ * does not affect production builds, as slab redzones are not enabled
+ * there.
*/
if (__slub_debug_enabled() &&
init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
@@ -503,8 +494,9 @@ void kasan_restore_multi_shot(bool enabled);
/*
* Exported functions for interfaces called from assembly or from generated
- * code. Declarations here to avoid warning about missing declarations.
+ * code. Declared here to avoid warnings about missing declarations.
*/
+
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
void __asan_register_globals(struct kasan_global *globals, size_t size);
void __asan_unregister_globals(struct kasan_global *globals, size_t size);
@@ -573,4 +565,4 @@ void __hwasan_storeN_noabort(unsigned long addr, size_t size);
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
-#endif
+#endif /* __MM_KASAN_KASAN_H */
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 0a9def8ce5e8..75585077eb6d 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -99,6 +99,17 @@ static unsigned long quarantine_size;
static DEFINE_RAW_SPINLOCK(quarantine_lock);
DEFINE_STATIC_SRCU(remove_cache_srcu);
+#ifdef CONFIG_PREEMPT_RT
+struct cpu_shrink_qlist {
+ raw_spinlock_t lock;
+ struct qlist_head qlist;
+};
+
+static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock),
+};
+#endif
+
/* Maximum size of the global queue. */
static unsigned long quarantine_max_size;
@@ -152,7 +163,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
* As the object now gets freed from the quarantine, assume that its
* free track is no longer valid.
*/
- *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
___cache_free(cache, object, _THIS_IP_);
@@ -308,10 +319,31 @@ static void qlist_move_cache(struct qlist_head *from,
}
}
-static void per_cpu_remove_cache(void *arg)
+#ifndef CONFIG_PREEMPT_RT
+static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
{
struct kmem_cache *cache = arg;
struct qlist_head to_free = QLIST_INIT;
+
+ qlist_move_cache(q, &to_free, cache);
+ qlist_free_all(&to_free, cache);
+}
+#else
+static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
+{
+ struct kmem_cache *cache = arg;
+ unsigned long flags;
+ struct cpu_shrink_qlist *sq;
+
+ sq = this_cpu_ptr(&shrink_qlist);
+ raw_spin_lock_irqsave(&sq->lock, flags);
+ qlist_move_cache(q, &sq->qlist, cache);
+ raw_spin_unlock_irqrestore(&sq->lock, flags);
+}
+#endif
+
+static void per_cpu_remove_cache(void *arg)
+{
struct qlist_head *q;
q = this_cpu_ptr(&cpu_quarantine);
@@ -322,8 +354,7 @@ static void per_cpu_remove_cache(void *arg)
*/
if (READ_ONCE(q->offline))
return;
- qlist_move_cache(q, &to_free, cache);
- qlist_free_all(&to_free, cache);
+ __per_cpu_remove_cache(q, arg);
}
/* Free all quarantined objects belonging to cache. */
@@ -341,6 +372,21 @@ void kasan_quarantine_remove_cache(struct kmem_cache *cache)
*/
on_each_cpu(per_cpu_remove_cache, cache, 1);
+#ifdef CONFIG_PREEMPT_RT
+ {
+ int cpu;
+ struct cpu_shrink_qlist *sq;
+
+ for_each_online_cpu(cpu) {
+ sq = per_cpu_ptr(&shrink_qlist, cpu);
+ raw_spin_lock_irqsave(&sq->lock, flags);
+ qlist_move_cache(&sq->qlist, &to_free, cache);
+ raw_spin_unlock_irqrestore(&sq->lock, flags);
+ }
+ qlist_free_all(&to_free, cache);
+ }
+#endif
+
raw_spin_lock_irqsave(&quarantine_lock, flags);
for (i = 0; i < QUARANTINE_BATCHES; i++) {
if (qlist_empty(&global_quarantine[i]))
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index efc5e79a103f..6689fb9a919b 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -66,7 +66,7 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info)
bug_type = "out-of-bounds";
break;
case KASAN_PAGE_REDZONE:
- case KASAN_KMALLOC_REDZONE:
+ case KASAN_SLAB_REDZONE:
bug_type = "slab-out-of-bounds";
break;
case KASAN_GLOBAL_REDZONE:
@@ -78,9 +78,9 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info)
case KASAN_STACK_PARTIAL:
bug_type = "stack-out-of-bounds";
break;
- case KASAN_FREE_PAGE:
- case KASAN_KMALLOC_FREE:
- case KASAN_KMALLOC_FREETRACK:
+ case KASAN_PAGE_FREE:
+ case KASAN_SLAB_FREE:
+ case KASAN_SLAB_FREETRACK:
bug_type = "use-after-free";
break;
case KASAN_ALLOCA_LEFT: