summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 11:39:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 11:39:33 -0700
commit81e11336d97e7a4c25a65c302ef2bf9cd9808ed4 (patch)
tree44e069e0bb26068137a1bda7ca5125c5b068b4ab /lib
parent5631c5e0eb9035d92ceb20fcd9cdb7779a3f5cc7 (diff)
parent912c05720f00d039103d356a59c37dc7c3995e01 (diff)
downloadlinux-81e11336d97e7a4c25a65c302ef2bf9cd9808ed4.tar.bz2
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few MM hotfixes - kthread, tools, scripts, ntfs and ocfs2 - some of MM Subsystems affected by this patch series: kthread, tools, scripts, ntfs, ocfs2 and mm (hofixes, pagealloc, slab-generic, slab, slub, kcsan, debug, pagecache, gup, swap, shmem, memcg, pagemap, mremap, mincore, sparsemem, vmalloc, kasan, pagealloc, hugetlb and vmscan). * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (162 commits) mm: vmscan: consistent update to pgrefill mm/vmscan.c: fix typo khugepaged: khugepaged_test_exit() check mmget_still_valid() khugepaged: retract_page_tables() remember to test exit khugepaged: collapse_pte_mapped_thp() protect the pmd lock khugepaged: collapse_pte_mapped_thp() flush the right range mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible mm: thp: replace HTTP links with HTTPS ones mm/page_alloc: fix memalloc_nocma_{save/restore} APIs mm/page_alloc.c: skip setting nodemask when we are in interrupt mm/page_alloc: fallbacks at most has 3 elements mm/page_alloc: silence a KASAN false positive mm/page_alloc.c: remove unnecessary end_bitidx for [set|get]_pfnblock_flags_mask() mm/page_alloc.c: simplify pageblock bitmap access mm/page_alloc.c: extract the common part in pfn_to_bitidx() mm/page_alloc.c: replace the definition of NR_MIGRATETYPE_BITS with PB_migratetype_bits mm/shuffle: remove dynamic reconfiguration mm/memory_hotplug: document why shuffle_zone() is relevant mm/page_alloc: remove nr_free_pagecache_pages() mm: remove vm_total_pages ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.kasan39
-rw-r--r--lib/Makefile1
-rw-r--r--lib/ioremap.c287
-rw-r--r--lib/mpi/mpiutil.c6
-rw-r--r--lib/percpu_counter.c19
-rw-r--r--lib/test_kasan.c87
6 files changed, 111 insertions, 328 deletions
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 34b84bcbd3d9..047b53dbfd58 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,7 +18,7 @@ config CC_HAS_KASAN_SW_TAGS
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
-config KASAN
+menuconfig KASAN
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
@@ -29,9 +29,10 @@ config KASAN
designed to find out-of-bounds accesses and use-after-free bugs.
See Documentation/dev-tools/kasan.rst for details.
+if KASAN
+
choice
prompt "KASAN mode"
- depends on KASAN
default KASAN_GENERIC
help
KASAN has two modes: generic KASAN (similar to userspace ASan,
@@ -39,6 +40,7 @@ choice
software tag-based KASAN (a version based on software memory
tagging, arm64 only, similar to userspace HWASan, enabled with
CONFIG_KASAN_SW_TAGS).
+
Both generic and tag-based KASAN are strictly debugging features.
config KASAN_GENERIC
@@ -50,16 +52,18 @@ config KASAN_GENERIC
select STACKDEPOT
help
Enables generic KASAN mode.
- Supported in both GCC and Clang. With GCC it requires version 4.9.2
- or later for basic support and version 5.0 or later for detection of
- out-of-bounds accesses for stack and global variables and for inline
- instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
- version 3.7.0 or later and it doesn't support detection of
- out-of-bounds accesses for global variables yet.
+
+ This mode is supported in both GCC and Clang. With GCC it requires
+ version 8.3.0 or later. With Clang it requires version 7.0.0 or
+ later, but detection of out-of-bounds accesses for global variables
+ is supported only since Clang 11.
+
This mode consumes about 1/8th of available memory at kernel start
and introduces an overhead of ~x1.5 for the rest of the allocations.
The performance slowdown is ~x3.
+
For better error detection enable CONFIG_STACKTRACE.
+
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
@@ -72,15 +76,19 @@ config KASAN_SW_TAGS
select STACKDEPOT
help
Enables software tag-based KASAN mode.
+
This mode requires Top Byte Ignore support by the CPU and therefore
- is only supported for arm64.
- This mode requires Clang version 7.0.0 or later.
+ is only supported for arm64. This mode requires Clang version 7.0.0
+ or later.
+
This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations.
This mode may potentially introduce problems relating to pointer
casting and comparison, as it embeds tags into the top byte of each
pointer.
+
For better error detection enable CONFIG_STACKTRACE.
+
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
@@ -88,7 +96,6 @@ endchoice
choice
prompt "Instrumentation type"
- depends on KASAN
default KASAN_OUTLINE
config KASAN_OUTLINE
@@ -107,13 +114,11 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
- For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
@@ -134,7 +139,7 @@ config KASAN_STACK
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
- depends on KASAN && S390
+ depends on S390
help
Compiling the kernel with KASan disables automatic 3-level vs
4-level paging selection. 3-level paging is used by default (up
@@ -151,7 +156,7 @@ config KASAN_SW_TAGS_IDENTIFY
config KASAN_VMALLOC
bool "Back mappings in vmalloc space with real shadow memory"
- depends on KASAN && HAVE_ARCH_KASAN_VMALLOC
+ depends on HAVE_ARCH_KASAN_VMALLOC
help
By default, the shadow region for vmalloc space is the read-only
zero page. This means that KASAN cannot detect errors involving
@@ -164,8 +169,10 @@ config KASAN_VMALLOC
config TEST_KASAN
tristate "Module for testing KASAN for bug detection"
- depends on m && KASAN
+ depends on m
help
This is a test module doing various nasty things like
out of bounds accesses, use after free. It is useful for testing
kernel debugging features like KASAN.
+
+endif # KASAN
diff --git a/lib/Makefile b/lib/Makefile
index 435f7f13b8aa..f39962104036 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -37,7 +37,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
lib-$(CONFIG_PRINTK) += dump_stack.o
-lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
diff --git a/lib/ioremap.c b/lib/ioremap.c
deleted file mode 100644
index 5ee3526f71b8..000000000000
--- a/lib/ioremap.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static int __read_mostly ioremap_p4d_capable;
-static int __read_mostly ioremap_pud_capable;
-static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
-
-static int __init set_nohugeiomap(char *str)
-{
- ioremap_huge_disabled = 1;
- return 0;
-}
-early_param("nohugeiomap", set_nohugeiomap);
-
-void __init ioremap_huge_init(void)
-{
- if (!ioremap_huge_disabled) {
- if (arch_ioremap_p4d_supported())
- ioremap_p4d_capable = 1;
- if (arch_ioremap_pud_supported())
- ioremap_pud_capable = 1;
- if (arch_ioremap_pmd_supported())
- ioremap_pmd_capable = 1;
- }
-}
-
-static inline int ioremap_p4d_enabled(void)
-{
- return ioremap_p4d_capable;
-}
-
-static inline int ioremap_pud_enabled(void)
-{
- return ioremap_pud_capable;
-}
-
-static inline int ioremap_pmd_enabled(void)
-{
- return ioremap_pmd_capable;
-}
-
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int ioremap_p4d_enabled(void) { return 0; }
-static inline int ioremap_pud_enabled(void) { return 0; }
-static inline int ioremap_pmd_enabled(void) { return 0; }
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pte_t *pte;
- u64 pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel_track(pmd, addr, mask);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- return 0;
-}
-
-static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pmd_enabled())
- return 0;
-
- if ((end - addr) != PMD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PMD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
-
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
-
- return pmd_set_huge(pmd, phys_addr, prot);
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
-
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PMD_MODIFIED;
- continue;
- }
-
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pud_enabled())
- return 0;
-
- if ((end - addr) != PUD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PUD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
-
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
-
- return pud_set_huge(pud, phys_addr, prot);
-}
-
-static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PUD_MODIFIED;
- continue;
- }
-
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_p4d_enabled())
- return 0;
-
- if ((end - addr) != P4D_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, P4D_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
-
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
-
- return p4d_set_huge(p4d, phys_addr, prot);
-}
-
-static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
-
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_P4D_MODIFIED;
- continue;
- }
-
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-int ioremap_page_range(unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
- pgtbl_mod_mask mask = 0;
-
- might_sleep();
- BUG_ON(addr >= end);
-
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
- &mask);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
-
- flush_cache_vmap(start, end);
-
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
-
- return err;
-}
-
-#ifdef CONFIG_GENERIC_IOREMAP
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
-{
- unsigned long offset, vaddr;
- phys_addr_t last_addr;
- struct vm_struct *area;
-
- /* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP,
- __builtin_return_address(0));
- if (!area)
- return NULL;
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(volatile void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
-#endif /* CONFIG_GENERIC_IOREMAP */
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 20ed0f766787..4cd2b335cb7f 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a)
if (!a)
return;
- kzfree(a);
+ kfree_sensitive(a);
}
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
@@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
- kzfree(a->d);
+ kfree_sensitive(a->d);
a->d = p;
} else {
a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
@@ -112,7 +112,7 @@ void mpi_free(MPI a)
return;
if (a->flags & 4)
- kzfree(a->d);
+ kfree_sensitive(a->d);
else
mpi_free_limb_space(a->d);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a66595ba5543..a2345de90e93 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -99,6 +99,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
+ * For percpu_counter with a big batch, the devication of its count could
+ * be big, and there is requirement to reduce the deviation, like when the
+ * counter's batch could be runtime decreased to get a better accuracy,
+ * which can be achieved by running this sync function on each CPU.
+ */
+void percpu_counter_sync(struct percpu_counter *fbc)
+{
+ unsigned long flags;
+ s64 count;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_sync);
+
+/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index dc2c6a51d11a..53e953bb1d1d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -23,6 +23,10 @@
#include <asm/page.h>
+#include "../mm/kasan/kasan.h"
+
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+
/*
* We assign some test results to these globals to make sure the tests
* are not eliminated as dead code.
@@ -48,7 +52,8 @@ static noinline void __init kmalloc_oob_right(void)
return;
}
- ptr[size] = 'x';
+ ptr[size + OOB_TAG_OFF] = 'x';
+
kfree(ptr);
}
@@ -100,7 +105,8 @@ static noinline void __init kmalloc_pagealloc_oob_right(void)
return;
}
- ptr[size] = 0;
+ ptr[size + OOB_TAG_OFF] = 0;
+
kfree(ptr);
}
@@ -170,7 +176,8 @@ static noinline void __init kmalloc_oob_krealloc_more(void)
return;
}
- ptr2[size2] = 'x';
+ ptr2[size2 + OOB_TAG_OFF] = 'x';
+
kfree(ptr2);
}
@@ -188,7 +195,9 @@ static noinline void __init kmalloc_oob_krealloc_less(void)
kfree(ptr1);
return;
}
- ptr2[size2] = 'x';
+
+ ptr2[size2 + OOB_TAG_OFF] = 'x';
+
kfree(ptr2);
}
@@ -224,7 +233,8 @@ static noinline void __init kmalloc_oob_memset_2(void)
return;
}
- memset(ptr+7, 0, 2);
+ memset(ptr + 7 + OOB_TAG_OFF, 0, 2);
+
kfree(ptr);
}
@@ -240,7 +250,8 @@ static noinline void __init kmalloc_oob_memset_4(void)
return;
}
- memset(ptr+5, 0, 4);
+ memset(ptr + 5 + OOB_TAG_OFF, 0, 4);
+
kfree(ptr);
}
@@ -257,7 +268,8 @@ static noinline void __init kmalloc_oob_memset_8(void)
return;
}
- memset(ptr+1, 0, 8);
+ memset(ptr + 1 + OOB_TAG_OFF, 0, 8);
+
kfree(ptr);
}
@@ -273,7 +285,8 @@ static noinline void __init kmalloc_oob_memset_16(void)
return;
}
- memset(ptr+1, 0, 16);
+ memset(ptr + 1 + OOB_TAG_OFF, 0, 16);
+
kfree(ptr);
}
@@ -289,7 +302,8 @@ static noinline void __init kmalloc_oob_in_memset(void)
return;
}
- memset(ptr, 0, size+5);
+ memset(ptr, 0, size + 5 + OOB_TAG_OFF);
+
kfree(ptr);
}
@@ -423,7 +437,8 @@ static noinline void __init kmem_cache_oob(void)
return;
}
- *p = p[size];
+ *p = p[size + OOB_TAG_OFF];
+
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}
@@ -473,7 +488,7 @@ static noinline void __init kasan_global_oob(void)
static noinline void __init kasan_stack_oob(void)
{
char stack_array[10];
- volatile int i = 0;
+ volatile int i = OOB_TAG_OFF;
char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
pr_info("out-of-bounds on stack\n");
@@ -520,25 +535,25 @@ static noinline void __init copy_user_test(void)
}
pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1);
+ unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1);
+ unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1);
+ unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1);
+ unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1);
+ unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
vm_munmap((unsigned long)usermem, PAGE_SIZE);
kfree(kmem);
@@ -766,15 +781,15 @@ static noinline void __init kmalloc_double_kzfree(void)
char *ptr;
size_t size = 16;
- pr_info("double-free (kzfree)\n");
+ pr_info("double-free (kfree_sensitive)\n");
ptr = kmalloc(size, GFP_KERNEL);
if (!ptr) {
pr_err("Allocation failed\n");
return;
}
- kzfree(ptr);
- kzfree(ptr);
+ kfree_sensitive(ptr);
+ kfree_sensitive(ptr);
}
#ifdef CONFIG_KASAN_VMALLOC
@@ -801,6 +816,35 @@ static noinline void __init vmalloc_oob(void)
static void __init vmalloc_oob(void) {}
#endif
+static struct kasan_rcu_info {
+ int i;
+ struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
+{
+ struct kasan_rcu_info *fp = container_of(rp,
+ struct kasan_rcu_info, rcu);
+
+ kfree(fp);
+ fp->i = 1;
+}
+
+static noinline void __init kasan_rcu_uaf(void)
+{
+ struct kasan_rcu_info *ptr;
+
+ pr_info("use-after-free in kasan_rcu_reclaim\n");
+ ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
+ call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -848,6 +892,7 @@ static int __init kmalloc_tests_init(void)
kasan_bitops();
kmalloc_double_kzfree();
vmalloc_oob();
+ kasan_rcu_uaf();
kasan_restore_multi_shot(multishot);