From 6475b2d846176e3272351266869481a21ff47866 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 2 Oct 2015 10:55:03 +0100 Subject: arm64: perf: move to shared arm_pmu framework Now that the arm_pmu framework has been factored out to drivers/perf we can make use of it for arm64, gaining support for heterogeneous PMUs and unifying the two codebases before they diverge further. The as yet unused PMU name for PMUv3 is changed to armv8_pmuv3, matching the style previously applied to the 32-bit PMUs. Signed-off-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 07d1811aa03f..98b4f98a13de 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -454,12 +454,8 @@ config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM config HW_PERF_EVENTS - bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS - default y - help - Enable hardware performance counter support for perf events. If - disabled, perf events will use software events only. + def_bool y + depends on ARM_PMU config SYS_SUPPORTS_HUGETLBFS def_bool y -- cgit v1.2.3 From 217d453d473c5ddfd140a06bf9d8575218551020 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 24 Sep 2015 17:32:14 +0800 Subject: arm64: fix a migrating irq bug when hotplug cpu When cpu is disabled, all irqs will be migratged to another cpu. In some cases, a new affinity is different, the old affinity need to be updated and if irq_set_affinity's return value is IRQ_SET_MASK_OK_DONE, the old affinity can not be updated. Fix it by using irq_do_set_affinity. And migrating interrupts is a core code matter, so use the generic function irq_migrate_all_off_this_cpu() to migrate interrupts in kernel/irq/migration.c. Cc: Jiang Liu Cc: Thomas Gleixner Cc: Mark Rutland Cc: Will Deacon Cc: Russell King - ARM Linux Cc: Hanjun Guo Acked-by: Marc Zyngier Signed-off-by: Yang Yingliang Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/irq.h | 1 - arch/arm64/kernel/irq.c | 62 -------------------------------------------- arch/arm64/kernel/smp.c | 3 ++- 4 files changed, 3 insertions(+), 64 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 98b4f98a13de..1b35bdbd5d74 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -427,6 +427,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" + select GENERIC_IRQ_MIGRATION help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index bbb251b14746..09169296c3cc 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -7,7 +7,6 @@ struct pt_regs; -extern void migrate_irqs(void); extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); static inline void acpi_irq_init(void) diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 11dc3fd47853..9f17ec071ee0 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -27,7 +27,6 @@ #include #include #include -#include unsigned long irq_err_count; @@ -54,64 +53,3 @@ void __init init_IRQ(void) if (!handle_arch_irq) panic("No interrupt controller found."); } - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 03b0aa28ea61..b7a973d6861e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -231,7 +231,8 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); + return 0; } -- cgit v1.2.3 From 39d114ddc68223022c12ae3a1573912bc4b585e5 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Mon, 12 Oct 2015 18:52:58 +0300 Subject: arm64: add KASAN support This patch adds arch specific code for kernel address sanitizer (see Documentation/kasan.txt). 1/8 of kernel addresses reserved for shadow memory. There was no big enough hole for this, so virtual addresses for shadow were stolen from vmalloc area. At early boot stage the whole shadow region populated with just one physical page (kasan_zero_page). Later, this page reused as readonly zero shadow for some memory that KASan currently don't track (vmalloc). After mapping the physical memory, pages for shadow memory are allocated and mapped. Functions like memset/memmove/memcpy do a lot of memory accesses. If bad pointer passed to one of these function it is important to catch this. Compiler's instrumentation cannot do this since these functions are written in assembly. KASan replaces memory functions with manually instrumented variants. Original functions declared as weak symbols so strong definitions in mm/kasan/kasan.c could replace them. Original functions have aliases with '__' prefix in name, so we could call non-instrumented variant if needed. Some files built without kasan instrumentation (e.g. mm/slub.c). Original mem* function replaced (via #define) with prefixed variants to disable memory access checks for such files. Signed-off-by: Andrey Ryabinin Tested-by: Linus Walleij Reviewed-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/Makefile | 7 ++ arch/arm64/include/asm/kasan.h | 36 +++++++++ arch/arm64/include/asm/pgtable.h | 7 ++ arch/arm64/include/asm/string.h | 16 ++++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/arm64ksyms.c | 3 + arch/arm64/kernel/head.S | 3 + arch/arm64/kernel/image.h | 6 ++ arch/arm64/kernel/module.c | 16 +++- arch/arm64/kernel/setup.c | 4 + arch/arm64/lib/memcpy.S | 3 + arch/arm64/lib/memmove.S | 7 +- arch/arm64/lib/memset.S | 3 + arch/arm64/mm/Makefile | 3 + arch/arm64/mm/kasan_init.c | 165 +++++++++++++++++++++++++++++++++++++++ drivers/firmware/efi/Makefile | 8 ++ scripts/Makefile.kasan | 4 +- 18 files changed, 288 insertions(+), 6 deletions(-) create mode 100644 arch/arm64/include/asm/kasan.h create mode 100644 arch/arm64/mm/kasan_init.c (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b35bdbd5d74..2782c1189208 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -48,6 +48,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f9914d7c1bb0..f41c6761ec36 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -55,6 +55,13 @@ else TEXT_OFFSET := 0x00080000 endif +# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) +# in 32-bit arithmetic +KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ + (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ + + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \ + - (1 << (64 - 32 - 3)) )) ) + export TEXT_OFFSET GZFLAGS core-y += arch/arm64/kernel/ arch/arm64/mm/ diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h new file mode 100644 index 000000000000..71dfe14acdca --- /dev/null +++ b/arch/arm64/include/asm/kasan.h @@ -0,0 +1,36 @@ +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KASAN + +#include + +/* + * KASAN_SHADOW_START: beginning of the kernel virtual addresses. + * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses. + */ +#define KASAN_SHADOW_START (VA_START) +#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1UL << (VA_BITS - 3))) + +/* + * This value is used to map an address to the corresponding shadow + * address by the following formula: + * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; + * + * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END] + * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET + * should satisfy the following equation: + * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61) + */ +#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) + +void kasan_init(void); + +#else +static inline void kasan_init(void) { } +#endif + +#endif +#endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3f481ef42c07..e3b515fbf0c2 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -41,7 +41,14 @@ * fixed mappings and modules */ #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) + +#ifndef CONFIG_KASAN #define VMALLOC_START (VA_START) +#else +#include +#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K) +#endif + #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 64d2d4884a9d..2eb714c4639f 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -36,17 +36,33 @@ extern __kernel_size_t strnlen(const char *, __kernel_size_t); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *, const void *, __kernel_size_t); +extern void *__memcpy(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); +extern void *__memmove(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMCHR extern void *memchr(const void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); +extern void *__memset(void *, int, __kernel_size_t); #define __HAVE_ARCH_MEMCMP extern int memcmp(const void *, const void *, size_t); + +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) + +/* + * For files that are not instrumented (e.g. mm/slub.c) we + * should use not instrumented version of mem* functions. + */ + +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memmove(dst, src, len) __memmove(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) +#endif + #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 7b17f6245f1e..1b6bda2ff102 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_armv8_deprecated.o := -I$(src) +KASAN_SANITIZE_efi-stub.o := n + CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index a85843ddbde8..3b6d8cc9dfe0 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -51,6 +51,9 @@ EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memcmp); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 28a81e948df9..2a8c1d5b3fae 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -444,6 +444,9 @@ __mmap_switched: str_l x21, __fdt_pointer, x5 // Save FDT pointer str_l x24, memstart_addr, x6 // Save PHYS_OFFSET mov x29, #0 +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif b start_kernel ENDPROC(__mmap_switched) diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index e083af0dd546..6eb8fee93321 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -80,6 +80,12 @@ __efistub_strcmp = __pi_strcmp; __efistub_strncmp = __pi_strncmp; __efistub___flush_dcache_area = __pi___flush_dcache_area; +#ifdef CONFIG_KASAN +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; +#endif + __efistub__text = _text; __efistub__end = _end; __efistub__edata = _edata; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 876eb8df50bf..f4bc779e62e8 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -34,9 +35,18 @@ void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, - NUMA_NO_NODE, __builtin_return_address(0)); + void *p; + + p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, + NUMA_NO_NODE, __builtin_return_address(0)); + + if (p && (kasan_module_alloc(p, size) < 0)) { + vfree(p); + return NULL; + } + + return p; } enum aarch64_reloc_op { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6bab21f84a9f..79df79a2ea61 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -434,6 +435,9 @@ void __init setup_arch(char **cmdline_p) paging_init(); relocate_initrd(); + + kasan_init(); + request_standard_resources(); early_ioremap_reset(); diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 36a6a62cf263..67613937711f 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -68,7 +68,10 @@ stp \ptr, \regB, [\regC], \val .endm + .weak memcpy +ENTRY(__memcpy) ENTRY(memcpy) #include "copy_template.S" ret ENDPIPROC(memcpy) +ENDPROC(__memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index 68e2f2035e23..a5a4459013b1 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -57,12 +57,14 @@ C_h .req x12 D_l .req x13 D_h .req x14 + .weak memmove +ENTRY(__memmove) ENTRY(memmove) cmp dstin, src - b.lo memcpy + b.lo __memcpy add tmp1, src, count cmp dstin, tmp1 - b.hs memcpy /* No overlap. */ + b.hs __memcpy /* No overlap. */ add dst, dstin, count add src, src, count @@ -195,3 +197,4 @@ ENTRY(memmove) b.ne .Ltail63 ret ENDPIPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index 29f405f08792..f2670a9f218c 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -54,6 +54,8 @@ dst .req x8 tmp3w .req w9 tmp3 .req x9 + .weak memset +ENTRY(__memset) ENTRY(memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 @@ -214,3 +216,4 @@ ENTRY(memset) b.ne .Ltail_maybe_long ret ENDPIPROC(memset) +ENDPROC(__memset) diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 773d37a14039..57f57fde5722 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -4,3 +4,6 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ context.o proc.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_ARM64_PTDUMP) += dump.o + +obj-$(CONFIG_KASAN) += kasan_init.o +KASAN_SANITIZE_kasan_init.o := n diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c new file mode 100644 index 000000000000..b6a92f5dd568 --- /dev/null +++ b/arch/arm64/mm/kasan_init.c @@ -0,0 +1,165 @@ +/* + * This file contains kasan initialization code for ARM64. + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "kasan: " fmt +#include +#include +#include +#include + +#include +#include +#include +#include + +static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); + +static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, + unsigned long end) +{ + pte_t *pte; + unsigned long next; + + if (pmd_none(*pmd)) + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + + pte = pte_offset_kernel(pmd, addr); + do { + next = addr + PAGE_SIZE; + set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), + PAGE_KERNEL)); + } while (pte++, addr = next, addr != end && pte_none(*pte)); +} + +static void __init kasan_early_pmd_populate(pud_t *pud, + unsigned long addr, + unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + + if (pud_none(*pud)) + pud_populate(&init_mm, pud, kasan_zero_pmd); + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + kasan_early_pte_populate(pmd, addr, next); + } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); +} + +static void __init kasan_early_pud_populate(pgd_t *pgd, + unsigned long addr, + unsigned long end) +{ + pud_t *pud; + unsigned long next; + + if (pgd_none(*pgd)) + pgd_populate(&init_mm, pgd, kasan_zero_pud); + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + kasan_early_pmd_populate(pud, addr, next); + } while (pud++, addr = next, addr != end && pud_none(*pud)); +} + +static void __init kasan_map_early_shadow(void) +{ + unsigned long addr = KASAN_SHADOW_START; + unsigned long end = KASAN_SHADOW_END; + unsigned long next; + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + kasan_early_pud_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} + +void __init kasan_early_init(void) +{ + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); + kasan_map_early_shadow(); +} + +static void __init clear_pgds(unsigned long start, + unsigned long end) +{ + /* + * Remove references to kasan page tables from + * swapper_pg_dir. pgd_clear() can't be used + * here because it's nop on 2,3-level pagetable setups + */ + for (; start < end; start += PGDIR_SIZE) + set_pgd(pgd_offset_k(start), __pgd(0)); +} + +static void __init cpu_set_ttbr1(unsigned long ttbr1) +{ + asm( + " msr ttbr1_el1, %0\n" + " isb" + : + : "r" (ttbr1)); +} + +void __init kasan_init(void) +{ + struct memblock_region *reg; + + /* + * We are going to perform proper setup of shadow memory. + * At first we should unmap early shadow (clear_pgds() call bellow). + * However, instrumented code couldn't execute without shadow memory. + * tmp_pg_dir used to keep early shadow mapped until full shadow + * setup will be finished. + */ + memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); + cpu_set_ttbr1(__pa(tmp_pg_dir)); + flush_tlb_all(); + + clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + + kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_mem_to_shadow((void *)MODULES_VADDR)); + + for_each_memblock(memory, reg) { + void *start = (void *)__phys_to_virt(reg->base); + void *end = (void *)__phys_to_virt(reg->base + reg->size); + + if (start >= end) + break; + + /* + * end + 1 here is intentional. We check several shadow bytes in + * advance to slightly speed up fastpath. In some rare cases + * we could cross boundary of mapped shadow, so we just map + * some more here. + */ + vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), + (unsigned long)kasan_mem_to_shadow(end) + 1, + pfn_to_nid(virt_to_pfn(start))); + } + + memset(kasan_zero_page, 0, PAGE_SIZE); + cpu_set_ttbr1(__pa(swapper_pg_dir)); + flush_tlb_all(); + + /* At this point kasan is fully initialized. Enable error messages */ + init_task.kasan_depth = 0; + pr_info("KernelAddressSanitizer initialized\n"); +} diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 6fd3da938717..413fcf2970c0 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -1,6 +1,14 @@ # # Makefile for linux kernel # + +# +# ARM64 maps efi runtime services in userspace addresses +# which don't have KASAN shadow. So dereference of these addresses +# in efi_call_virt() will cause crash if this code instrumented. +# +KASAN_SANITIZE_runtime-wrappers.o := n + obj-$(CONFIG_EFI) += efi.o vars.o reboot.o obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_ESRT) += esrt.o diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 3f874d24234f..37323b0df374 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -5,10 +5,12 @@ else call_threshold := 0 endif +KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) + CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \ - -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \ + -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET) \ --param asan-stack=1 --param asan-globals=1 \ --param asan-instrumentation-with-call-threshold=$(call_threshold)) -- cgit v1.2.3 From 755e70b7e3f189aa2503c510fb98208e477a5030 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Mon, 19 Oct 2015 14:19:32 +0100 Subject: arm64: Clean config usages for page size We use !CONFIG_ARM64_64K_PAGES for CONFIG_ARM64_4K_PAGES (and vice versa) in code. It all worked well, so far since we only had two options. Now, with the introduction of 16K, these cases will break. This patch cleans up the code to use the required CONFIG symbol expression without the assumption that !64K => 4K (and vice versa) Cc: Will Deacon Acked-by: Mark Rutland Signed-off-by: Suzuki K. Poulose Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 4 ++-- arch/arm64/Kconfig.debug | 2 +- arch/arm64/include/asm/thread_info.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 2782c1189208..4654c2761b7c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -466,7 +466,7 @@ config ARCH_WANT_GENERAL_HUGETLB def_bool y config ARCH_WANT_HUGE_PMD_SHARE - def_bool y if !ARM64_64K_PAGES + def_bool y if ARM64_4K_PAGES config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y @@ -678,7 +678,7 @@ source "fs/Kconfig.binfmt" config COMPAT bool "Kernel support for 32-bit EL0" - depends on !ARM64_64K_PAGES || EXPERT + depends on ARM64_4K_PAGES || EXPERT select COMPAT_BINFMT_ELF select HAVE_UID16 select OLD_SIGSUSPEND3 diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index d6285ef9b5f9..c24d6adc0420 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -77,7 +77,7 @@ config DEBUG_RODATA If in doubt, say Y config DEBUG_ALIGN_RODATA - depends on DEBUG_RODATA && !ARM64_64K_PAGES + depends on DEBUG_RODATA && ARM64_4K_PAGES bool "Align linker sections up to SECTION_SIZE" help If this option is enabled, sections that may potentially be marked as diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 555c6dec5ef2..5eac6a2300af 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -23,7 +23,7 @@ #include -#ifndef CONFIG_ARM64_64K_PAGES +#ifdef CONFIG_ARM64_4K_PAGES #define THREAD_SIZE_ORDER 2 #endif -- cgit v1.2.3 From db488be354bc85724d7b9523e94435fdaa761a35 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Mon, 19 Oct 2015 14:19:34 +0100 Subject: arm64: Kconfig: Fix help text about AArch32 support with 64K pages Update the help text for ARM64_64K_PAGES to reflect the reality about AArch32 support. Cc: Will Deacon Signed-off-by: Suzuki K. Poulose Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4654c2761b7c..13b51a0c4e7f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -368,8 +368,8 @@ config ARM64_64K_PAGES help This feature enables 64KB pages support (4KB by default) allowing only two levels of page tables and faster TLB - look-up. AArch32 emulation is not available when this feature - is enabled. + look-up. AArch32 emulation requires applications compiled + with 64K aligned segments. endchoice -- cgit v1.2.3 From 44eaacf1b8999b15cec89bd9d9cd989da4798d53 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Mon, 19 Oct 2015 14:19:37 +0100 Subject: arm64: Add 16K page size support This patch turns on the 16K page support in the kernel. We support 48bit VA (4 level page tables) and 47bit VA (3 level page tables). With 16K we can map 128 entries using contiguous bit hint at level 3 to map 2M using single TLB entry. TODO: 16K supports 32 contiguous entries at level 2 to get us 1G(which is not yet supported by the infrastructure). That should be a separate patch altogether. Cc: Will Deacon Cc: Jeremy Linton Cc: Marc Zyngier Cc: Christoffer Dall Cc: Steve Capper Signed-off-by: Suzuki K. Poulose Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 42 +++++++++++++++++++++++++++++++----- arch/arm64/include/asm/page.h | 3 +++ arch/arm64/include/asm/sysreg.h | 3 +++ arch/arm64/include/asm/thread_info.h | 2 ++ arch/arm64/kvm/Kconfig | 3 +++ arch/arm64/mm/proc.S | 4 +++- 6 files changed, 51 insertions(+), 6 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 13b51a0c4e7f..854166422c42 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -173,7 +173,8 @@ config PGTABLE_LEVELS default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48 default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 - default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48 + default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 + default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 source "init/Kconfig" @@ -363,6 +364,13 @@ config ARM64_4K_PAGES help This feature enables 4KB pages support. +config ARM64_16K_PAGES + bool "16KB" + help + The system will use 16KB pages support. AArch32 emulation + requires applications compiled with 16K (or a multiple of 16K) + aligned segments. + config ARM64_64K_PAGES bool "64KB" help @@ -376,6 +384,7 @@ endchoice choice prompt "Virtual address space size" default ARM64_VA_BITS_39 if ARM64_4K_PAGES + default ARM64_VA_BITS_47 if ARM64_16K_PAGES default ARM64_VA_BITS_42 if ARM64_64K_PAGES help Allows choosing one of multiple possible virtual address @@ -390,6 +399,10 @@ config ARM64_VA_BITS_42 bool "42-bit" depends on ARM64_64K_PAGES +config ARM64_VA_BITS_47 + bool "47-bit" + depends on ARM64_16K_PAGES + config ARM64_VA_BITS_48 bool "48-bit" @@ -399,6 +412,7 @@ config ARM64_VA_BITS int default 39 if ARM64_VA_BITS_39 default 42 if ARM64_VA_BITS_42 + default 47 if ARM64_VA_BITS_47 default 48 if ARM64_VA_BITS_48 config CPU_BIG_ENDIAN @@ -466,7 +480,7 @@ config ARCH_WANT_GENERAL_HUGETLB def_bool y config ARCH_WANT_HUGE_PMD_SHARE - def_bool y if ARM64_4K_PAGES + def_bool y if ARM64_4K_PAGES || ARM64_16K_PAGES config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y @@ -503,7 +517,25 @@ config XEN config FORCE_MAX_ZONEORDER int default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) + default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE) default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + We make sure that we can allocate upto a HugePage size for each configuration. + Hence we have : + MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 + + However for 4K, we choose a higher default value, 11 as opposed to 10, giving us + 4M allocations matching the default size used by generic code. menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" @@ -689,9 +721,9 @@ config COMPAT the user helper functions, VFP support and the ptrace interface are handled appropriately by the kernel. - If you also enabled CONFIG_ARM64_64K_PAGES, please be aware that you - will only be able to execute AArch32 binaries that were compiled with - 64k aligned segments. + If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware + that you will only be able to execute AArch32 binaries that were compiled + with page size aligned segments. If you want to execute 32-bit userspace applications, say Y. diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index da3235494ffd..9b2f5a9d019d 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -24,6 +24,9 @@ #ifdef CONFIG_ARM64_64K_PAGES #define PAGE_SHIFT 16 #define CONT_SHIFT 5 +#elif defined(CONFIG_ARM64_16K_PAGES) +#define PAGE_SHIFT 14 +#define CONT_SHIFT 7 #else #define PAGE_SHIFT 12 #define CONT_SHIFT 4 diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d59cb231a673..4b57e1080538 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -59,6 +59,9 @@ #if defined(CONFIG_ARM64_4K_PAGES) #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT #define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED #elif defined(CONFIG_ARM64_64K_PAGES) #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT #define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 5eac6a2300af..90c7ff233735 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -25,6 +25,8 @@ #ifdef CONFIG_ARM64_4K_PAGES #define THREAD_SIZE_ORDER 2 +#elif defined(CONFIG_ARM64_16K_PAGES) +#define THREAD_SIZE_ORDER 0 #endif #define THREAD_SIZE 16384 diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 5c7e920e4861..6a7d5cd772e6 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -19,6 +19,7 @@ if VIRTUALIZATION config KVM bool "Kernel-based Virtual Machine (KVM) support" depends on OF + depends on !ARM64_16K_PAGES select MMU_NOTIFIER select PREEMPT_NOTIFIERS select ANON_INODES @@ -33,6 +34,8 @@ config KVM select HAVE_KVM_IRQFD ---help--- Support hosting virtualized guest machines. + We don't support KVM with 16K page tables yet, due to the multiple + levels of fake page tables. If unsure, say N. diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 91cb2eaac256..3a4b8b19978b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -30,7 +30,9 @@ #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K -#else +#elif defined(CONFIG_ARM64_16K_PAGES) +#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K +#else /* CONFIG_ARM64_4K_PAGES */ #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K #endif -- cgit v1.2.3 From 215399392fe40f33880ea9de49a1ed8ee26edd10 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Mon, 19 Oct 2015 14:19:38 +0100 Subject: arm64: 36 bit VA 36bit VA lets us use 2 level page tables while limiting the available address space to 64GB. Cc: Will Deacon Cc: Steve Capper Signed-off-by: Suzuki K. Poulose Reviewed-by: Ard Biesheuvel Tested-by: Ard Biesheuvel Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 854166422c42..a346671d474e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -170,6 +170,7 @@ config FIX_EARLYCON_MEM config PGTABLE_LEVELS int + default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36 default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48 default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 @@ -391,6 +392,10 @@ choice space sizes. The level of translation table is determined by a combination of page size and virtual address space size. +config ARM64_VA_BITS_36 + bool "36-bit" + depends on ARM64_16K_PAGES + config ARM64_VA_BITS_39 bool "39-bit" depends on ARM64_4K_PAGES @@ -410,6 +415,7 @@ endchoice config ARM64_VA_BITS int + default 36 if ARM64_VA_BITS_36 default 39 if ARM64_VA_BITS_39 default 42 if ARM64_VA_BITS_42 default 47 if ARM64_VA_BITS_47 @@ -480,7 +486,7 @@ config ARCH_WANT_GENERAL_HUGETLB def_bool y config ARCH_WANT_HUGE_PMD_SHARE - def_bool y if ARM64_4K_PAGES || ARM64_16K_PAGES + def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y -- cgit v1.2.3 From 56a3f30e029396948989b96716f27b87e3510e0f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 20 Oct 2015 14:59:20 +0100 Subject: arm64: Make 36-bit VA depend on EXPERT Commit 215399392fe4 (arm64: 36 bit VA) introduced 36-bit VA support for the arm64 kernel when the 16KB page configuration is enabled. While this is a valid hardware configuration, it's not something we want to encourage since it reduces the memory (and I/O) range that the kernel can access. Make this depend on EXPERT to avoid complaints of Linux not mapping the whole RAM, especially on platforms following the ARM recommended memory map. Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a346671d474e..c62fb0393567 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -393,7 +393,7 @@ choice a combination of page size and virtual address space size. config ARM64_VA_BITS_36 - bool "36-bit" + bool "36-bit" if EXPERT depends on ARM64_16K_PAGES config ARM64_VA_BITS_39 -- cgit v1.2.3 From f90df5e27d978c492c4d911476622a7413621213 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 26 Oct 2015 11:48:16 +0800 Subject: arm64: make Timer Interrupt Frequency selectable It allows a selectable timer interrupt frequency of 100, 250, 300 and 1000 HZ. We will get better performance when choose a suitable frequency in some scene. Cc: Will Deacon Signed-off-by: Kefeng Wang Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c62fb0393567..4d8a5b256f65 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -454,10 +454,7 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu. source kernel/Kconfig.preempt - -config HZ - int - default 100 +source kernel/Kconfig.hz config ARCH_HAS_HOLES_MEMORYMODEL def_bool y if SPARSEMEM -- cgit v1.2.3