summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 12:27:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 12:27:09 -0700
commit0bf13a84362e750a90008af259b098d7c0e0755b (patch)
treef7a315eb2c10ede9d92292a791fcff97486b924c /mm
parent51518aa68c1ffb54f2fdfed5324af30325529b32 (diff)
parented5edd5a70b9525085403f193786395179ea303d (diff)
downloadlinux-0bf13a84362e750a90008af259b098d7c0e0755b.tar.bz2
Merge tag 'kernel-hardening-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kernel hardening updates from Kees Cook: - usercopy hardening expanded to check other allocation types (Matthew Wilcox, Yuanzheng Song) - arm64 stackleak behavioral improvements (Mark Rutland) - arm64 CFI code gen improvement (Sami Tolvanen) - LoadPin LSM block dev API adjustment (Christoph Hellwig) - Clang randstruct support (Bill Wendling, Kees Cook) * tag 'kernel-hardening-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (34 commits) loadpin: stop using bdevname mm: usercopy: move the virt_addr_valid() below the is_vmalloc_addr() gcc-plugins: randstruct: Remove cast exception handling af_unix: Silence randstruct GCC plugin warning niu: Silence randstruct warnings big_keys: Use struct for internal payload gcc-plugins: Change all version strings match kernel randomize_kstack: Improve docs on requirements/rationale lkdtm/stackleak: fix CONFIG_GCC_PLUGIN_STACKLEAK=n arm64: entry: use stackleak_erase_on_task_stack() stackleak: add on/off stack variants lkdtm/stackleak: check stack boundaries lkdtm/stackleak: prevent unexpected stack usage lkdtm/stackleak: rework boundary management lkdtm/stackleak: avoid spurious failure stackleak: rework poison scanning stackleak: rework stack high bound handling stackleak: clarify variable names stackleak: rework stack low bound handling stackleak: remove redundant check ...
Diffstat (limited to 'mm')
-rw-r--r--mm/usercopy.c91
1 files changed, 24 insertions, 67 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 2c235d5c2364..baeacc735b83 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -17,6 +17,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
+#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include <asm/sections.h>
@@ -157,91 +158,47 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
usercopy_abort("null address", NULL, to_user, ptr, n);
}
-/* Checks for allocs that are marked in some way as spanning multiple pages. */
-static inline void check_page_span(const void *ptr, unsigned long n,
- struct page *page, bool to_user)
+static inline void check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
{
-#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
- const void *end = ptr + n - 1;
- struct page *endpage;
- bool is_reserved, is_cma;
+ struct folio *folio;
- /*
- * Sometimes the kernel data regions are not marked Reserved (see
- * check below). And sometimes [_sdata,_edata) does not cover
- * rodata and/or bss, so check each range explicitly.
- */
+ if (is_kmap_addr(ptr)) {
+ unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
- /* Allow reads of kernel rodata region (if not marked as Reserved). */
- if (ptr >= (const void *)__start_rodata &&
- end <= (const void *)__end_rodata) {
- if (!to_user)
- usercopy_abort("rodata", NULL, to_user, 0, n);
+ if ((unsigned long)ptr + n - 1 > page_end)
+ usercopy_abort("kmap", NULL, to_user,
+ offset_in_page(ptr), n);
return;
}
- /* Allow kernel data region (if not marked as Reserved). */
- if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
- return;
+ if (is_vmalloc_addr(ptr)) {
+ struct vm_struct *area = find_vm_area(ptr);
+ unsigned long offset;
- /* Allow kernel bss region (if not marked as Reserved). */
- if (ptr >= (const void *)__bss_start &&
- end <= (const void *)__bss_stop)
- return;
-
- /* Is the object wholly within one base page? */
- if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
- ((unsigned long)end & (unsigned long)PAGE_MASK)))
- return;
+ if (!area) {
+ usercopy_abort("vmalloc", "no area", to_user, 0, n);
+ return;
+ }
- /* Allow if fully inside the same compound (__GFP_COMP) page. */
- endpage = virt_to_head_page(end);
- if (likely(endpage == page))
+ offset = ptr - area->addr;
+ if (offset + n > get_vm_area_size(area))
+ usercopy_abort("vmalloc", NULL, to_user, offset, n);
return;
-
- /*
- * Reject if range is entirely either Reserved (i.e. special or
- * device memory), or CMA. Otherwise, reject since the object spans
- * several independently allocated pages.
- */
- is_reserved = PageReserved(page);
- is_cma = is_migrate_cma_page(page);
- if (!is_reserved && !is_cma)
- usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
-
- for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
- page = virt_to_head_page(ptr);
- if (is_reserved && !PageReserved(page))
- usercopy_abort("spans Reserved and non-Reserved pages",
- NULL, to_user, 0, n);
- if (is_cma && !is_migrate_cma_page(page))
- usercopy_abort("spans CMA and non-CMA pages", NULL,
- to_user, 0, n);
}
-#endif
-}
-
-static inline void check_heap_object(const void *ptr, unsigned long n,
- bool to_user)
-{
- struct folio *folio;
if (!virt_addr_valid(ptr))
return;
- /*
- * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
- * highmem page or fallback to virt_to_page(). The following
- * is effectively a highmem-aware virt_to_slab().
- */
- folio = page_folio(kmap_to_page((void *)ptr));
+ folio = virt_to_folio(ptr);
if (folio_test_slab(folio)) {
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user);
- } else {
- /* Verify object does not incorrectly span multiple pages. */
- check_page_span(ptr, n, folio_page(folio, 0), to_user);
+ } else if (folio_test_large(folio)) {
+ unsigned long offset = ptr - folio_address(folio);
+ if (offset + n > folio_size(folio))
+ usercopy_abort("page alloc", NULL, to_user, offset, n);
}
}