diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2017-12-03 13:28:52 -0800 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2017-12-16 22:37:12 -0800 |
commit | c633544a6154146a210cf158157a1ae7c55473b6 (patch) | |
tree | d670a9984373622d991c382126f341b6f32154cd | |
parent | 1af1e8a39dc0fab5e50f10462c636da8c1e0cfbb (diff) | |
download | linux-c633544a6154146a210cf158157a1ae7c55473b6.tar.bz2 |
xtensa: add support for KASAN
Cover kernel addresses above 0x90000000 by the shadow map. Enable
HAVE_ARCH_KASAN when MMU is enabled. Provide kasan_early_init that fills
shadow map with writable copies of kasan_zero_page. Call
kasan_early_init right after mmu initialization in the setup_arch.
Provide kasan_init that allocates proper shadow map pages from the
memblock and puts these pages into the shadow map for addresses from
VMALLOC area to the end of KSEG. Call kasan_init right after memblock
initialization. Don't use KASAN for the boot code, MMU and KASAN
initialization and page fault handler. Make kernel stack size 4 times
larger when KASAN is enabled to avoid stack overflows.
GCC 7.3, 8 or newer is required to build the xtensa kernel with KASAN.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
-rw-r--r-- | Documentation/features/debug/KASAN/arch-support.txt | 2 | ||||
-rw-r--r-- | Documentation/xtensa/mmu.txt | 6 | ||||
-rw-r--r-- | arch/xtensa/Kconfig | 5 | ||||
-rw-r--r-- | arch/xtensa/boot/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/xtensa/include/asm/kasan.h | 37 | ||||
-rw-r--r-- | arch/xtensa/include/asm/kmem_layout.h | 4 | ||||
-rw-r--r-- | arch/xtensa/include/asm/pgtable.h | 3 | ||||
-rw-r--r-- | arch/xtensa/include/asm/string.h | 19 | ||||
-rw-r--r-- | arch/xtensa/kernel/setup.c | 7 | ||||
-rw-r--r-- | arch/xtensa/kernel/xtensa_ksyms.c | 3 | ||||
-rw-r--r-- | arch/xtensa/lib/memcopy.S | 10 | ||||
-rw-r--r-- | arch/xtensa/lib/memset.S | 5 | ||||
-rw-r--r-- | arch/xtensa/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/xtensa/mm/init.c | 7 | ||||
-rw-r--r-- | arch/xtensa/mm/kasan_init.c | 95 |
15 files changed, 201 insertions, 9 deletions
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt index 76bbd7fe27b3..8abb013db8d1 100644 --- a/Documentation/features/debug/KASAN/arch-support.txt +++ b/Documentation/features/debug/KASAN/arch-support.txt @@ -35,5 +35,5 @@ | um: | TODO | | unicore32: | TODO | | x86: | ok | - | xtensa: | TODO | + | xtensa: | ok | ----------------------- diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt index 16921393e366..318114de63f3 100644 --- a/Documentation/xtensa/mmu.txt +++ b/Documentation/xtensa/mmu.txt @@ -71,6 +71,8 @@ Default MMUv2-compatible layout. +------------------+ | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ +| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE ++------------------+ 0x8e400000 +------------------+ | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB +------------------+ VMALLOC_END @@ -111,6 +113,8 @@ Default MMUv2-compatible layout. +------------------+ | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ +| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE ++------------------+ 0x8e400000 +------------------+ | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB +------------------+ VMALLOC_END @@ -152,6 +156,8 @@ Default MMUv2-compatible layout. +------------------+ | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE +------------------+ +| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE ++------------------+ 0x8e400000 +------------------+ | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB +------------------+ VMALLOC_END diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index fffe05b698ac..f9f95d6e8da8 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -15,6 +15,7 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_ARCH_KASAN if MMU select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG @@ -80,6 +81,10 @@ config VARIANT_IRQ_SWITCH config HAVE_XTENSA_GPIO32 def_bool n +config KASAN_SHADOW_OFFSET + hex + default 0x6e400000 + menu "Processor type and features" choice diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile index 2fe182915b63..355127faade1 100644 --- a/arch/xtensa/boot/lib/Makefile +++ b/arch/xtensa/boot/lib/Makefile @@ -15,6 +15,8 @@ CFLAGS_REMOVE_inftrees.o = -pg CFLAGS_REMOVE_inffast.o = -pg endif +KASAN_SANITIZE := n + CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h new file mode 100644 index 000000000000..54be80876e57 --- /dev/null +++ b/arch/xtensa/include/asm/kasan.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KASAN + +#include <linux/kernel.h> +#include <linux/sizes.h> +#include <asm/kmem_layout.h> + +/* Start of area covered by KASAN */ +#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000) +/* Start of the shadow map */ +#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE) +/* Size of the shadow map */ +#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT) +/* Offset for mem to shadow address transformation */ +#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET) + +void __init kasan_early_init(void); +void __init kasan_init(void); + +#else + +static inline void kasan_early_init(void) +{ +} + +static inline void kasan_init(void) +{ +} + +#endif +#endif +#endif diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 28f9260a766c..2317c835a4db 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -71,7 +71,11 @@ #endif +#ifndef CONFIG_KASAN #define KERNEL_STACK_SHIFT 13 +#else +#define KERNEL_STACK_SHIFT 15 +#endif #define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) #endif diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 30dd5b2e4ad5..38802259978f 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -12,9 +12,9 @@ #define _XTENSA_PGTABLE_H #define __ARCH_USE_5LEVEL_HACK -#include <asm-generic/pgtable-nopmd.h> #include <asm/page.h> #include <asm/kmem_layout.h> +#include <asm-generic/pgtable-nopmd.h> /* * We only use two ring levels, user and kernel space. @@ -170,6 +170,7 @@ #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) +#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) #if (DCACHE_WAY_SIZE > PAGE_SIZE) diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 8d5d9dfadb09..586bad9fe187 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h @@ -108,14 +108,33 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) #define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); +extern void *__memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *__dest, __const__ void *__src, size_t __n); +extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); /* Don't build bcopy at all ... */ #define __HAVE_ARCH_BCOPY +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ + +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif +#endif + #endif /* _XTENSA_STRING_H */ diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 960212e72a70..a931af9075f2 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -36,6 +36,7 @@ #endif #include <asm/bootparam.h> +#include <asm/kasan.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -251,6 +252,10 @@ void __init init_arch(bp_tag_t *bp_start) init_mmu(); + /* Initialize initial KASAN shadow map */ + + kasan_early_init(); + /* Parse boot parameters */ if (bp_start) @@ -388,7 +393,7 @@ void __init setup_arch(char **cmdline_p) #endif parse_early_param(); bootmem_init(); - + kasan_init(); unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 672391003e40..3a443f83ae87 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -41,6 +41,9 @@ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(__strncpy_user); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index 24d650864c3a..c0f6981719d6 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S @@ -109,7 +109,8 @@ addi a5, a5, 2 j .Ldstaligned # dst is now aligned, return to main algorithm -ENTRY(memcpy) +ENTRY(__memcpy) +WEAK(memcpy) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -271,7 +272,7 @@ ENTRY(memcpy) s8i a6, a5, 0 retw -ENDPROC(memcpy) +ENDPROC(__memcpy) /* * void bcopy(const void *src, void *dest, size_t n); @@ -376,7 +377,8 @@ ENDPROC(bcopy) j .Lbackdstaligned # dst is now aligned, # return to main algorithm -ENTRY(memmove) +ENTRY(__memmove) +WEAK(memmove) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len @@ -548,4 +550,4 @@ ENTRY(memmove) s8i a6, a5, 0 retw -ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S index a6cd04ba966f..276747dec300 100644 --- a/arch/xtensa/lib/memset.S +++ b/arch/xtensa/lib/memset.S @@ -31,7 +31,8 @@ */ .text -ENTRY(memset) +ENTRY(__memset) +WEAK(memset) entry sp, 16 # minimal stack frame # a2/ dst, a3/ c, a4/ length @@ -140,7 +141,7 @@ EX(10f) s8i a3, a5, 0 .Lbytesetdone: retw -ENDPROC(memset) +ENDPROC(__memset) .section .fixup, "ax" .align 4 diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index 0b3d296a016a..734888a00dc8 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -5,3 +5,8 @@ obj-y := init.o misc.o obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_KASAN) += kasan_init.o + +KASAN_SANITIZE_fault.o := n +KASAN_SANITIZE_kasan_init.o := n +KASAN_SANITIZE_mmu.o := n diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 6fc1cb093fb3..0d980f05da82 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -100,6 +100,9 @@ void __init mem_init(void) mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" +#ifdef CONFIG_KASAN + " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" +#endif #ifdef CONFIG_MMU " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" #endif @@ -108,6 +111,10 @@ void __init mem_init(void) " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", +#ifdef CONFIG_KASAN + KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, + KASAN_SHADOW_SIZE >> 20, +#endif #ifdef CONFIG_MMU VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c new file mode 100644 index 000000000000..6b532b6bd785 --- /dev/null +++ b/arch/xtensa/mm/kasan_init.c @@ -0,0 +1,95 @@ +/* + * Xtensa KASAN shadow map initialization + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2017 Cadence Design Systems Inc. + */ + +#include <linux/bootmem.h> +#include <linux/init_task.h> +#include <linux/kasan.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <asm/initialize_mmu.h> +#include <asm/tlbflush.h> +#include <asm/traps.h> + +void __init kasan_early_init(void) +{ + unsigned long vaddr = KASAN_SHADOW_START; + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + int i; + + for (i = 0; i < PTRS_PER_PTE; ++i) + set_pte(kasan_zero_pte + i, + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); + + for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { + BUG_ON(!pmd_none(*pmd)); + set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); + } + early_trap_init(); +} + +static void __init populate(void *start, void *end) +{ + unsigned long n_pages = (end - start) / PAGE_SIZE; + unsigned long n_pmds = n_pages / PTRS_PER_PTE; + unsigned long i, j; + unsigned long vaddr = (unsigned long)start; + pgd_t *pgd = pgd_offset_k(vaddr); + pmd_t *pmd = pmd_offset(pgd, vaddr); + pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); + + pr_debug("%s: %p - %p\n", __func__, start, end); + + for (i = j = 0; i < n_pmds; ++i) { + int k; + + for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { + phys_addr_t phys = + memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, + MEMBLOCK_ALLOC_ANYWHERE); + + set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); + } + } + + for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) + set_pmd(pmd + i, __pmd((unsigned long)pte)); + + local_flush_tlb_all(); + memset(start, 0, end - start); +} + +void __init kasan_init(void) +{ + int i; + + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - + (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); + BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); + + /* + * Replace shadow map pages that cover addresses from VMALLOC area + * start to the end of KSEG with clean writable pages. + */ + populate(kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); + + /* Write protect kasan_zero_page and zero-initialize it again. */ + for (i = 0; i < PTRS_PER_PTE; ++i) + set_pte(kasan_zero_pte + i, + mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); + + local_flush_tlb_all(); + memset(kasan_zero_page, 0, PAGE_SIZE); + + /* At this point kasan is fully initialized. Enable error messages. */ + current->kasan_depth = 0; + pr_info("KernelAddressSanitizer initialized\n"); +} |