From 1109a5d907015005cdbe9eaa4fec40213e2f9010 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 10 Jan 2022 23:15:30 +0000 Subject: usercopy: Remove HARDENED_USERCOPY_PAGESPAN There isn't enough information to make this a useful check any more; the useful parts of it were moved in earlier patches, so remove this set of checks now. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Kees Cook Reviewed-by: David Hildenbrand Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220110231530.665970-5-willy@infradead.org --- mm/usercopy.c | 61 ----------------------------------------------------------- 1 file changed, 61 deletions(-) (limited to 'mm/usercopy.c') diff --git a/mm/usercopy.c b/mm/usercopy.c index 9458c2b24b02..ac8a093e90c1 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -158,64 +158,6 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, usercopy_abort("null address", NULL, to_user, ptr, n); } -/* Checks for allocs that are marked in some way as spanning multiple pages. */ -static inline void check_page_span(const void *ptr, unsigned long n, - struct page *page, bool to_user) -{ -#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN - const void *end = ptr + n - 1; - bool is_reserved, is_cma; - - /* - * Sometimes the kernel data regions are not marked Reserved (see - * check below). And sometimes [_sdata,_edata) does not cover - * rodata and/or bss, so check each range explicitly. - */ - - /* Allow reads of kernel rodata region (if not marked as Reserved). */ - if (ptr >= (const void *)__start_rodata && - end <= (const void *)__end_rodata) { - if (!to_user) - usercopy_abort("rodata", NULL, to_user, 0, n); - return; - } - - /* Allow kernel data region (if not marked as Reserved). */ - if (ptr >= (const void *)_sdata && end <= (const void *)_edata) - return; - - /* Allow kernel bss region (if not marked as Reserved). */ - if (ptr >= (const void *)__bss_start && - end <= (const void *)__bss_stop) - return; - - /* Is the object wholly within one base page? */ - if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == - ((unsigned long)end & (unsigned long)PAGE_MASK))) - return; - - /* - * Reject if range is entirely either Reserved (i.e. special or - * device memory), or CMA. Otherwise, reject since the object spans - * several independently allocated pages. - */ - is_reserved = PageReserved(page); - is_cma = is_migrate_cma_page(page); - if (!is_reserved && !is_cma) - usercopy_abort("spans multiple pages", NULL, to_user, 0, n); - - for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { - page = virt_to_head_page(ptr); - if (is_reserved && !PageReserved(page)) - usercopy_abort("spans Reserved and non-Reserved pages", - NULL, to_user, 0, n); - if (is_cma && !is_migrate_cma_page(page)) - usercopy_abort("spans CMA and non-CMA pages", NULL, - to_user, 0, n); - } -#endif -} - static inline void check_heap_object(const void *ptr, unsigned long n, bool to_user) { @@ -257,9 +199,6 @@ static inline void check_heap_object(const void *ptr, unsigned long n, unsigned long offset = ptr - folio_address(folio); if (offset + n > folio_size(folio)) usercopy_abort("page alloc", NULL, to_user, offset, n); - } else { - /* Verify object does not incorrectly span multiple pages. */ - check_page_span(ptr, n, folio_page(folio, 0), to_user); } } -- cgit v1.2.3