summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-22 13:34:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-22 13:34:27 -0800
commitc45647f9f562b52915b43b6bb447827cebf511bd (patch)
tree150ddfa8007d02088dcf3d4bb27204af58fb6658 /arch/arm/include
parentd8355e740f419a081796e869bafdfc0756b0bf2a (diff)
parentecbbb88727aee7880527d4b320b4d06dde75d46d (diff)
downloadlinux-c45647f9f562b52915b43b6bb447827cebf511bd.tar.bz2
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux
Pull ARM updates from Russell King: - Rework phys/virt translation - Add KASan support - Move DT out of linear map region - Use more PC-relative addressing in assembly - Remove FP emulation handling while in kernel mode - Link with '-z norelro' - remove old check for GCC <= 4.2 in ARM unwinder code - disable big endian if using clang's linker * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (46 commits) ARM: 9027/1: head.S: explicitly map DT even if it lives in the first physical section ARM: 9038/1: Link with '-z norelro' ARM: 9037/1: uncompress: Add OF_DT_MAGIC macro ARM: 9036/1: uncompress: Fix dbgadtb size parameter name ARM: 9035/1: uncompress: Add be32tocpu macro ARM: 9033/1: arm/smp: Drop the macro S(x,s) ARM: 9032/1: arm/mm: Convert PUD level pgtable helper macros into functions ARM: 9031/1: hyp-stub: remove unused .L__boot_cpu_mode_offset symbol ARM: 9044/1: vfp: use undef hook for VFP support detection ARM: 9034/1: __div64_32(): straighten up inline asm constraints ARM: 9030/1: entry: omit FP emulation for UND exceptions taken in kernel mode ARM: 9029/1: Make iwmmxt.S support Clang's integrated assembler ARM: 9028/1: disable KASAN in call stack capturing routines ARM: 9026/1: unwind: remove old check for GCC <= 4.2 ARM: 9025/1: Kconfig: CPU_BIG_ENDIAN depends on !LD_IS_LLD ARM: 9024/1: Drop useless cast of "u64" to "long long" ARM: 9023/1: Spelling s/mmeory/memory/ ARM: 9022/1: Change arch/arm/lib/mem*.S to use WEAK instead of .weak ARM: kvm: replace open coded VA->PA calculations with adr_l call ARM: head.S: use PC relative insn sequence to calculate PHYS_OFFSET ...
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/assembler.h88
-rw-r--r--arch/arm/include/asm/div64.h21
-rw-r--r--arch/arm/include/asm/elf.h5
-rw-r--r--arch/arm/include/asm/fixmap.h2
-rw-r--r--arch/arm/include/asm/kasan.h33
-rw-r--r--arch/arm/include/asm/kasan_def.h81
-rw-r--r--arch/arm/include/asm/memory.h67
-rw-r--r--arch/arm/include/asm/pgalloc.h8
-rw-r--r--arch/arm/include/asm/pgtable-2level.h27
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/include/asm/prom.h4
-rw-r--r--arch/arm/include/asm/string.h26
-rw-r--r--arch/arm/include/asm/thread_info.h8
-rw-r--r--arch/arm/include/asm/uaccess-asm.h2
14 files changed, 329 insertions, 45 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index feac2c8b86f2..6ed30421f697 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -259,7 +259,7 @@
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
+ .long 9998b - . ;\
9997: instr ;\
.if . - 9997b == 2 ;\
nop ;\
@@ -270,7 +270,7 @@
.popsection
#define ALT_UP_B(label) \
.pushsection ".alt.smp.init", "a" ;\
- .long 9998b ;\
+ .long 9998b - . ;\
W(b) . + (label - 9998b) ;\
.popsection
#else
@@ -494,4 +494,88 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#define _ASM_NOKPROBE(entry)
#endif
+ .macro __adldst_l, op, reg, sym, tmp, c
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr\c \tmp, .La\@
+ .subsection 1
+ .align 2
+.La\@: .long \sym - .Lpc\@
+ .previous
+ .else
+ .ifnb \c
+ THUMB( ittt \c )
+ .endif
+ movw\c \tmp, #:lower16:\sym - .Lpc\@
+ movt\c \tmp, #:upper16:\sym - .Lpc\@
+ .endif
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .set .Lpc\@, . + 8 // PC bias
+ .ifc \op, add
+ add\c \reg, \tmp, pc
+ .else
+ \op\c \reg, [pc, \tmp]
+ .endif
+#else
+.Lb\@: add\c \tmp, \tmp, pc
+ /*
+ * In Thumb-2 builds, the PC bias depends on whether we are currently
+ * emitting into a .arm or a .thumb section. The size of the add opcode
+ * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
+ * emitting in ARM mode, so let's use this to account for the bias.
+ */
+ .set .Lpc\@, . + (. - .Lb\@)
+
+ .ifnc \op, add
+ \op\c \reg, [\tmp]
+ .endif
+#endif
+ .endm
+
+ /*
+ * mov_l - move a constant value or [relocated] address into a register
+ */
+ .macro mov_l, dst:req, imm:req
+ .if __LINUX_ARM_ARCH__ < 7
+ ldr \dst, =\imm
+ .else
+ movw \dst, #:lower16:\imm
+ movt \dst, #:upper16:\imm
+ .endif
+ .endm
+
+ /*
+ * adr_l - adr pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro adr_l, dst:req, sym:req, cond
+ __adldst_l add, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * ldr_l - ldr <literal> pseudo-op with unlimited range
+ *
+ * @dst: destination register
+ * @sym: name of the symbol
+ * @cond: conditional opcode suffix
+ */
+ .macro ldr_l, dst:req, sym:req, cond
+ __adldst_l ldr, \dst, \sym, \dst, \cond
+ .endm
+
+ /*
+ * str_l - str <literal> pseudo-op with unlimited range
+ *
+ * @src: source register
+ * @sym: name of the symbol
+ * @tmp: mandatory scratch register
+ * @cond: conditional opcode suffix
+ */
+ .macro str_l, src:req, sym:req, tmp:req, cond
+ __adldst_l str, \src, \sym, \tmp, \cond
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 898e9c78a7e7..595e538f5bfb 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -21,29 +21,20 @@
* assembly implementation with completely non standard calling convention
* for arguments and results (beware).
*/
-
-#ifdef __ARMEB__
-#define __xh "r0"
-#define __xl "r1"
-#else
-#define __xl "r0"
-#define __xh "r1"
-#endif
-
static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
{
register unsigned int __base asm("r4") = base;
register unsigned long long __n asm("r0") = *n;
register unsigned long long __res asm("r2");
- register unsigned int __rem asm(__xh);
- asm( __asmeq("%0", __xh)
+ unsigned int __rem;
+ asm( __asmeq("%0", "r0")
__asmeq("%1", "r2")
- __asmeq("%2", "r0")
- __asmeq("%3", "r4")
+ __asmeq("%2", "r4")
"bl __do_div64"
- : "=r" (__rem), "=r" (__res)
- : "r" (__n), "r" (__base)
+ : "+r" (__n), "=r" (__res)
+ : "r" (__base)
: "ip", "lr", "cc");
+ __rem = __n >> 32;
*n = __res;
return __rem;
}
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 61941f369861..b8102a6ddf16 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -51,6 +51,7 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
@@ -58,11 +59,15 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
+#define R_ARM_MOVW_PREL_NC 45
+#define R_ARM_MOVT_PREL 46
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
+#define R_ARM_THM_MOVW_PREL_NC 49
+#define R_ARM_THM_MOVT_PREL 50
/*
* These are used to set parameters in the core dumps.
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index c279a8a463a2..707068f852c2 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -2,7 +2,7 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-#define FIXADDR_START 0xffc00000UL
+#define FIXADDR_START 0xffc80000UL
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 000000000000..303c35df3135
--- /dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan.h
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+
+#include <asm/kasan_def.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/*
+ * The compiler uses a shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from 'compiler's shadow offset' +
+ * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
+ */
+
+asmlinkage void kasan_early_init(void);
+extern void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h
new file mode 100644
index 000000000000..5739605aa7cf
--- /dev/null
+++ b/arch/arm/include/asm/kasan_def.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/arm/include/asm/kasan_def.h
+ *
+ * Copyright (c) 2018 Huawei Technologies Co., Ltd.
+ *
+ * Author: Abbott Liu <liuwenliang@huawei.com>
+ */
+
+#ifndef __ASM_KASAN_DEF_H
+#define __ASM_KASAN_DEF_H
+
+#ifdef CONFIG_KASAN
+
+/*
+ * Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for
+ * the Arm kernel address sanitizer. We are "stealing" lowmem (the 4GB
+ * addressable by a 32bit architecture) out of the virtual address
+ * space to use as shadow memory for KASan as follows:
+ *
+ * +----+ 0xffffffff
+ * | | \
+ * | | |-> Static kernel image (vmlinux) BSS and page table
+ * | |/
+ * +----+ PAGE_OFFSET
+ * | | \
+ * | | |-> Loadable kernel modules virtual address space area
+ * | |/
+ * +----+ MODULES_VADDR = KASAN_SHADOW_END
+ * | | \
+ * | | |-> The shadow area of kernel virtual address.
+ * | |/
+ * +----+-> TASK_SIZE (start of kernel space) = KASAN_SHADOW_START the
+ * | |\ shadow address of MODULES_VADDR
+ * | | |
+ * | | |
+ * | | |-> The user space area in lowmem. The kernel address
+ * | | | sanitizer do not use this space, nor does it map it.
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | |/
+ * ------ 0
+ *
+ * 1) KASAN_SHADOW_START
+ * This value begins with the MODULE_VADDR's shadow address. It is the
+ * start of kernel virtual space. Since we have modules to load, we need
+ * to cover also that area with shadow memory so we can find memory
+ * bugs in modules.
+ *
+ * 2) KASAN_SHADOW_END
+ * This value is the 0x100000000's shadow address: the mapping that would
+ * be after the end of the kernel memory at 0xffffffff. It is the end of
+ * kernel address sanitizer shadow area. It is also the start of the
+ * module area.
+ *
+ * 3) KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * As you would expect, >> 3 is equal to dividing by 8, meaning each
+ * byte in the shadow memory covers 8 bytes of kernel memory, so one
+ * bit shadow memory per byte of kernel memory is used.
+ *
+ * The KASAN_SHADOW_OFFSET is provided in a Kconfig option depending
+ * on the VMSPLIT layout of the system: the kernel and userspace can
+ * split up lowmem in different ways according to needs, so we calculate
+ * the shadow offset depending on this.
+ */
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (32 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 99035b5891ef..2f841cb65c30 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,6 +18,7 @@
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
+#include <asm/kasan_def.h>
/* PAGE_OFFSET - the virtual address of the start of the kernel image */
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
@@ -28,7 +29,11 @@
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
+#ifndef CONFIG_KASAN
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE (KASAN_SHADOW_START)
+#endif
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
@@ -67,6 +72,10 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
+#define FDT_FIXED_BASE UL(0xff800000)
+#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
+#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
+
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
@@ -107,6 +116,7 @@ extern unsigned long vectors_base;
#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
+#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
#endif /* !CONFIG_MMU */
@@ -173,6 +183,7 @@ extern unsigned long vectors_base;
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
+#define __PV_BITS_23_16 0x810000
#define __PV_BITS_7_0 0x81
extern unsigned long __pv_phys_pfn_offset;
@@ -183,43 +194,65 @@ extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
-#define __pv_stub(from,to,instr,type) \
+#ifndef CONFIG_THUMB2_KERNEL
+#define __pv_stub(from,to,instr) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
+ "2: " instr " %0, %0, %3\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 1b - ., 2b - .\n" \
" .popsection\n" \
: "=r" (to) \
- : "r" (from), "I" (type))
+ : "r" (from), "I" (__PV_BITS_31_24), \
+ "I"(__PV_BITS_23_16))
-#define __pv_stub_mov_hi(t) \
- __asm__ volatile("@ __pv_stub_mov\n" \
- "1: mov %R0, %1\n" \
+#define __pv_add_carry_stub(x, y) \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " adds %Q0, %1, %R0, lsl #20\n" \
+ "1: mov %R0, %2\n" \
+ " adc %R0, %R0, #0\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 0b - ., 1b - .\n" \
" .popsection\n" \
- : "=r" (t) \
- : "I" (__PV_BITS_7_0))
+ : "=&r" (y) \
+ : "r" (x), "I" (__PV_BITS_7_0) \
+ : "cc")
+
+#else
+#define __pv_stub(from,to,instr) \
+ __asm__("@ __pv_stub\n" \
+ "0: movw %0, #0\n" \
+ " lsl %0, #21\n" \
+ " " instr " %0, %1, %0\n" \
+ " .pushsection .pv_table,\"a\"\n" \
+ " .long 0b - .\n" \
+ " .popsection\n" \
+ : "=&r" (to) \
+ : "r" (from))
#define __pv_add_carry_stub(x, y) \
- __asm__ volatile("@ __pv_add_carry_stub\n" \
- "1: adds %Q0, %1, %2\n" \
+ __asm__("@ __pv_add_carry_stub\n" \
+ "0: movw %R0, #0\n" \
+ " lsls %R0, #21\n" \
+ " adds %Q0, %1, %R0\n" \
+ "1: mvn %R0, #0\n" \
" adc %R0, %R0, #0\n" \
" .pushsection .pv_table,\"a\"\n" \
- " .long 1b\n" \
+ " .long 0b - ., 1b - .\n" \
" .popsection\n" \
- : "+r" (y) \
- : "r" (x), "I" (__PV_BITS_31_24) \
+ : "=&r" (y) \
+ : "r" (x) \
: "cc")
+#endif
static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{
phys_addr_t t;
if (sizeof(phys_addr_t) == 4) {
- __pv_stub(x, t, "add", __PV_BITS_31_24);
+ __pv_stub(x, t, "add");
} else {
- __pv_stub_mov_hi(t);
__pv_add_carry_stub(x, t);
}
return t;
@@ -235,7 +268,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
* assembler expression receives 32 bit argument
* in place where 'r' 32 bit operand is expected.
*/
- __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
+ __pv_stub((unsigned long) x, t, "sub");
return t;
}
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 15f4674715f8..fdee1f04f4f3 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -21,6 +21,7 @@
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#ifdef CONFIG_ARM_LPAE
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
@@ -28,14 +29,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#else /* !CONFIG_ARM_LPAE */
+#define PGD_SIZE (PAGE_SIZE << 2)
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
+#ifdef CONFIG_KASAN
+/* The KASan core unconditionally calls pud_populate() on all architectures */
+#define pud_populate(mm,pmd,pte) do { } while (0)
+#else
#define pud_populate(mm,pmd,pte) BUG()
-
+#endif
#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index baf7d0204eb5..70fe69bdcce2 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -179,11 +179,28 @@
* the pud: the pud entry is never bad, always exists, and can't be set or
* cleared.
*/
-#define pud_none(pud) (0)
-#define pud_bad(pud) (0)
-#define pud_present(pud) (1)
-#define pud_clear(pudp) do { } while (0)
-#define set_pud(pud,pudp) do { } while (0)
+static inline int pud_none(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return 1;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b9241051e5cb..9e6b97286307 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -96,7 +96,7 @@ unsigned long get_wchan(struct task_struct *p);
#define __ALT_SMP_ASM(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
+ " .long 9998b - .\n" \
" " up "\n" \
" .popsection\n"
#else
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index 1e36c40533c1..402e3f34c7ed 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -9,12 +9,12 @@
#ifdef CONFIG_OF
-extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
-static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
{
return NULL;
}
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index 111a1d8a41dd..6c607c68f3ad 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -5,6 +5,9 @@
/*
* We don't do inline string functions, since the
* optimised inline asm versions are not small.
+ *
+ * The __underscore versions of some functions are for KASan to be able
+ * to replace them with instrumented versions.
*/
#define __HAVE_ARCH_STRRCHR
@@ -15,15 +18,18 @@ extern char * strchr(const char * s, int c);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMMOVE
extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMCHR
extern void * memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
#define __HAVE_ARCH_MEMSET32
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
@@ -39,4 +45,24 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8, v >> 32);
}
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * must use non-instrumented versions of the mem*
+ * functions named __memcpy() etc. All such kernel code has
+ * been tagged with KASAN_SANITIZE_file.o = n, which means
+ * that the address sanitization argument isn't passed to the
+ * compiler, and __SANITIZE_ADDRESS__ is not set. As a result
+ * these defines kick in.
+ */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index eb7ce2747eb0..70d4cbc49ae1 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -13,7 +13,15 @@
#include <asm/fpstate.h>
#include <asm/page.h>
+#ifdef CONFIG_KASAN
+/*
+ * KASan uses a lot of extra stack space so the thread size order needs to
+ * be increased.
+ */
+#define THREAD_SIZE_ORDER 2
+#else
#define THREAD_SIZE_ORDER 1
+#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_START_SP (THREAD_SIZE - 8)
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index 907571fd05c6..e6eb7a2aaf1e 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -85,7 +85,7 @@
*/
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
- mov \tmp2, #TASK_SIZE
+ ldr \tmp2, =TASK_SIZE
str \tmp2, [\tsk, #TI_ADDR_LIMIT]
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])