From c3f896dcf1e47959aca4f8e6ac9537b478949126 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 1 Jun 2020 21:51:57 -0700 Subject: mm: switch the test_vmalloc module to use __vmalloc_node No need to export the very low-level __vmalloc_node_range when the test module can use a slightly higher level variant. [akpm@linux-foundation.org: add missing `node' arg] [akpm@linux-foundation.org: fix riscv nommu build] Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Acked-by: Peter Zijlstra (Intel) Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Daniel Vetter Cc: David Airlie Cc: Gao Xiang Cc: Greg Kroah-Hartman Cc: Haiyang Zhang Cc: Johannes Weiner Cc: "K. Y. Srinivasan" Cc: Laura Abbott Cc: Mark Rutland Cc: Michael Kelley Cc: Minchan Kim Cc: Nitin Gupta Cc: Robin Murphy Cc: Sakari Ailus Cc: Stephen Hemminger Cc: Sumit Semwal Cc: Wei Liu Cc: Benjamin Herrenschmidt Cc: Catalin Marinas Cc: Heiko Carstens Cc: Paul Mackerras Cc: Vasily Gorbik Cc: Will Deacon Link: http://lkml.kernel.org/r/20200414131348.444715-26-hch@lst.de Signed-off-by: Linus Torvalds --- lib/test_vmalloc.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 8bbefcaddfe8..ddc9685702b1 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -91,12 +91,8 @@ static int random_size_align_alloc_test(void) */ size = ((rnd % 10) + 1) * PAGE_SIZE; - ptr = __vmalloc_node_range(size, align, - VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_ZERO, - PAGE_KERNEL, - 0, 0, __builtin_return_address(0)); - + ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0, + __builtin_return_address(0)); if (!ptr) return -1; @@ -118,12 +114,8 @@ static int align_shift_alloc_test(void) for (i = 0; i < BITS_PER_LONG; i++) { align = ((unsigned long) 1) << i; - ptr = __vmalloc_node_range(PAGE_SIZE, align, - VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_ZERO, - PAGE_KERNEL, - 0, 0, __builtin_return_address(0)); - + ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0, + __builtin_return_address(0)); if (!ptr) return -1; @@ -139,13 +131,9 @@ static int fix_align_alloc_test(void) int i; for (i = 0; i < test_loop_count; i++) { - ptr = __vmalloc_node_range(5 * PAGE_SIZE, - THREAD_ALIGN << 1, - VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_ZERO, - PAGE_KERNEL, - 0, 0, __builtin_return_address(0)); - + ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1, + GFP_KERNEL | __GFP_ZERO, 0, + __builtin_return_address(0)); if (!ptr) return -1; -- cgit v1.2.3 From 6c0c7d2b365b21a413f6d75772a8a4a2c7d36916 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 1 Jun 2020 21:52:26 -0700 Subject: mm/ioremap: track which page-table levels were modified Track at which levels in the page-table entries were modified by ioremap_page_range(). After the page-table has been modified, use that information do decide whether the new arch_sync_kernel_mappings() needs to be called. The iounmap path re-uses vunmap(), which has already been taken care of. Signed-off-by: Joerg Roedel Signed-off-by: Andrew Morton Acked-by: Andy Lutomirski Acked-by: Peter Zijlstra (Intel) Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dave Hansen Cc: "H . Peter Anvin" Cc: Ingo Molnar Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: "Rafael J. Wysocki" Cc: Steven Rostedt (VMware) Cc: Thomas Gleixner Cc: Vlastimil Babka Link: http://lkml.kernel.org/r/20200515140023.25469-4-joro@8bytes.org Signed-off-by: Linus Torvalds --- lib/ioremap.c | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) (limited to 'lib') diff --git a/lib/ioremap.c b/lib/ioremap.c index 3f0e18543de8..ad485f08173b 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -61,13 +61,14 @@ static inline int ioremap_pmd_enabled(void) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { pte_t *pte; u64 pfn; pfn = phys_addr >> PAGE_SHIFT; - pte = pte_alloc_kernel(pmd, addr); + pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; do { @@ -75,6 +76,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); + *mask |= PGTBL_PTE_MODIFIED; return 0; } @@ -101,21 +103,24 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, } static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; - pmd = pmd_alloc(&init_mm, pud, addr); + pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); - if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) + if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { + *mask |= PGTBL_PMD_MODIFIED; continue; + } - if (ioremap_pte_range(pmd, addr, next, phys_addr, prot)) + if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask)) return -ENOMEM; } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -144,21 +149,24 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, } static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; - pud = pud_alloc(&init_mm, p4d, addr); + pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); - if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) + if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) { + *mask |= PGTBL_PUD_MODIFIED; continue; + } - if (ioremap_pmd_range(pud, addr, next, phys_addr, prot)) + if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -187,21 +195,24 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, } static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; - p4d = p4d_alloc(&init_mm, pgd, addr); + p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); - if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) + if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { + *mask |= PGTBL_P4D_MODIFIED; continue; + } - if (ioremap_pud_range(p4d, addr, next, phys_addr, prot)) + if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -214,6 +225,7 @@ int ioremap_page_range(unsigned long addr, unsigned long start; unsigned long next; int err; + pgtbl_mod_mask mask = 0; might_sleep(); BUG_ON(addr >= end); @@ -222,13 +234,17 @@ int ioremap_page_range(unsigned long addr, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot); + err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot, + &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); flush_cache_vmap(start, end); + if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + arch_sync_kernel_mappings(start, end); + return err; } -- cgit v1.2.3 From 9380ce246a052a1e00121cd480028b6907aeae38 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 1 Jun 2020 21:52:46 -0700 Subject: ubsan: entirely disable alignment checks under UBSAN_TRAP Commit 8d58f222e85f ("ubsan: disable UBSAN_ALIGNMENT under COMPILE_TEST") tried to fix the pathological results of UBSAN_ALIGNMENT with UBSAN_TRAP (which objtool would rightly scream about), but it made an assumption about how COMPILE_TEST gets set (it is not set for randconfig). As a result, we need a bigger hammer here: just don't allow the alignment checks with the trap mode. Fixes: 8d58f222e85f ("ubsan: disable UBSAN_ALIGNMENT under COMPILE_TEST") Reported-by: Randy Dunlap Signed-off-by: Kees Cook Signed-off-by: Andrew Morton Acked-by: Randy Dunlap Cc: Josh Poimboeuf Cc: Dmitry Vyukov Cc: Elena Petrova Link: http://lkml.kernel.org/r/202005291236.000FCB6@keescook Link: https://lore.kernel.org/lkml/742521db-1e8c-0d7a-1ed4-a908894fb497@infradead.org/ Signed-off-by: Linus Torvalds --- lib/Kconfig.ubsan | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 929211039bac..27bcc2568c95 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -63,7 +63,7 @@ config UBSAN_SANITIZE_ALL config UBSAN_ALIGNMENT bool "Enable checks for pointers alignment" default !HAVE_EFFICIENT_UNALIGNED_ACCESS - depends on !X86 || !COMPILE_TEST + depends on !UBSAN_TRAP help This option enables the check of unaligned memory accesses. Enabling this option on architectures that support unaligned -- cgit v1.2.3