summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 12:39:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 12:39:07 -0800
commit01e9d22638f387b5413163d1030169b6478c09c5 (patch)
treef36991611954663429f2ed36ec19145dd0c9ec4f /arch
parent541d284be0fcca3d3990e6dd89b091aec66849c6 (diff)
parent6660800fb7fd0f66faecb3c550fe59709220ade5 (diff)
downloadlinux-01e9d22638f387b5413163d1030169b6478c09c5.tar.bz2
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: - UEFI boot and runtime services support for ARM from Ard Biesheuvel and Roy Franz. - DT compatibility with old atags booting protocol for Nokia N900 devices from Ivaylo Dimitrov. - PSCI firmware interface using new arm-smc calling convention from Jens Wiklander. - Runtime patching for udiv/sdiv instructions for ARMv7 CPUs that support these instructions from Nicolas Pitre. - L2x0 cache updates from Dirk B and Linus Walleij. - Randconfig fixes from Arnd Bergmann. - ARMv7M (nommu) updates from Ezequiel Garcia * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (34 commits) ARM: 8481/2: drivers: psci: replace psci firmware calls ARM: 8480/2: arm64: add implementation for arm-smccc ARM: 8479/2: add implementation for arm-smccc ARM: 8478/2: arm/arm64: add arm-smccc ARM: 8494/1: mm: Enable PXN when running non-LPAE kernel on LPAE processor ARM: 8496/1: OMAP: RX51: save ATAGS data in the early boot stage ARM: 8495/1: ATAGS: move save_atags() to arch/arm/include/asm/setup.h ARM: 8452/3: PJ4: make coprocessor access sequences buildable in Thumb2 mode ARM: 8482/1: l2x0: make it possible to disable outer sync from DT ARM: 8488/1: Make IPI_CPU_BACKTRACE a "non-secure" SGI ARM: 8487/1: Remove IPI_CALL_FUNC_SINGLE ARM: 8485/1: cpuidle: remove cpu parameter from the cpuidle_ops suspend hook ARM: 8484/1: Documentation: l2c2x0: Mention separate controllers explicitly ARM: 8483/1: Documentation: l2c: Rename l2cc to l2c2x0 ARM: 8477/1: runtime patch udiv/sdiv instructions into __aeabi_{u}idiv() ARM: 8476/1: VDSO: use PTR_ERR_OR_ZERO for vma check ARM: 8453/2: proc-v7.S: don't locate temporary stack space in .text section ARM: add UEFI stub support ARM: wire up UEFI init and runtime support ARM: only consider memblocks with NOMAP cleared for linear mapping ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig50
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/efi-header.S130
-rw-r--r--arch/arm/boot/compressed/head.S54
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S7
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/bug.h5
-rw-r--r--arch/arm/include/asm/cpuidle.h2
-rw-r--r--arch/arm/include/asm/efi.h83
-rw-r--r--arch/arm/include/asm/fixmap.h29
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/mach/map.h2
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/psci.h2
-rw-r--r--arch/arm/include/asm/setup.h6
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/atags.h6
-rw-r--r--arch/arm/kernel/cpuidle.c2
-rw-r--r--arch/arm/kernel/efi.c38
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/pj4-cp0.c4
-rw-r--r--arch/arm/kernel/psci-call.S31
-rw-r--r--arch/arm/kernel/setup.c77
-rw-r--r--arch/arm/kernel/smccc-call.S62
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/vdso.c2
-rw-r--r--arch/arm/lib/lib1funcs.S8
-rw-r--r--arch/arm/mach-omap2/board-generic.c12
-rw-r--r--arch/arm/mm/cache-l2x0.c33
-rw-r--r--arch/arm/mm/cache-uniphier.c13
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm/mm/ioremap.c9
-rw-r--r--arch/arm/mm/mmu.c130
-rw-r--r--arch/arm/mm/proc-v7.S23
-rw-r--r--arch/arm/mm/proc-v7m.S14
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c5
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/psci-call.S28
-rw-r--r--arch/arm64/kernel/smccc-call.S43
42 files changed, 786 insertions, 176 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 688dc7b0d951..b0e7ff176028 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -20,6 +20,7 @@ config ARM
select GENERIC_ALLOCATOR
select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -33,10 +34,11 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
- select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARM_SMCCC if CPU_V7
select HAVE_BPF_JIT
select HAVE_CC_STACKPROTECTOR
select HAVE_CONTEXT_TRACKING
@@ -45,7 +47,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if MMU
- select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32
+ select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
@@ -802,6 +804,7 @@ config ARCH_VIRT
bool "Dummy Virtual Machine" if ARCH_MULTI_V7
select ARM_AMBA
select ARM_GIC
+ select ARM_GIC_V2M if PCI_MSI
select ARM_GIC_V3
select ARM_PSCI
select HAVE_ARM_ARCH_TIMER
@@ -1425,7 +1428,7 @@ config BIG_LITTLE
config BL_SWITCHER
bool "big.LITTLE switcher support"
- depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
select ARM_CPU_SUSPEND
select CPU_PM
help
@@ -1484,7 +1487,7 @@ config HOTPLUG_CPU
config ARM_PSCI
bool "Support for the ARM Power State Coordination Interface (PSCI)"
- depends on CPU_V7
+ depends on HAVE_ARM_SMCCC
select ARM_PSCI_FW
help
Say Y here if you want Linux to communicate with system firmware
@@ -1607,6 +1610,24 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11
config ARM_ASM_UNIFIED
bool
+config ARM_PATCH_IDIV
+ bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
+ depends on CPU_32v7 && !XIP_KERNEL
+ default y
+ help
+ The ARM compiler inserts calls to __aeabi_idiv() and
+ __aeabi_uidiv() when it needs to perform division on signed
+ and unsigned integers. Some v7 CPUs have support for the sdiv
+ and udiv instructions that can be used to implement those
+ functions.
+
+ Enabling this option allows the kernel to modify itself to
+ replace the first two instructions of these library functions
+ with the sdiv or udiv plus "bx lr" instructions when the CPU
+ it is running on supports them. Typically this will be faster
+ and less power intensive than running the original library
+ code to do integer division.
+
config AEABI
bool "Use the ARM EABI to compile the kernel"
help
@@ -2043,6 +2064,25 @@ config AUTO_ZRELADDR
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
+config EFI_STUB
+ bool
+
+config EFI
+ bool "UEFI runtime support"
+ depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ select EFI_STUB
+ select EFI_ARMSTUB
+ select EFI_RUNTIME_WRAPPERS
+ ---help---
+ This option provides support for runtime services provided
+ by UEFI firmware (such as non-volatile variables, realtime
+ clock, and platform reset). A UEFI stub is also provided to
+ allow the kernel to be booted as an EFI application. This
+ is only useful for kernels that may run on systems that have
+ UEFI firmware.
+
endmenu
menu "CPU Power Management"
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 3f9a9ebc77c3..4c23a68a0917 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -167,9 +167,11 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
false; \
fi
+efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a
+
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
- $(bswapsdi2) FORCE
+ $(bswapsdi2) $(efi-obj-y) FORCE
@$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
new file mode 100644
index 000000000000..9d5dc4fda3c1
--- /dev/null
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2013-2015 Linaro Ltd
+ * Authors: Roy Franz <roy.franz@linaro.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ .macro __nop
+#ifdef CONFIG_EFI_STUB
+ @ This is almost but not quite a NOP, since it does clobber the
+ @ condition flags. But it is the best we can do for EFI, since
+ @ PE/COFF expects the magic string "MZ" at offset 0, while the
+ @ ARM/Linux boot protocol expects an executable instruction
+ @ there.
+ .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
+#else
+ mov r0, r0
+#endif
+ .endm
+
+ .macro __EFI_HEADER
+#ifdef CONFIG_EFI_STUB
+ b __efi_start
+
+ .set start_offset, __efi_start - start
+ .org start + 0x3c
+ @
+ @ The PE header can be anywhere in the file, but for
+ @ simplicity we keep it together with the MSDOS header
+ @ The offset to the PE/COFF header needs to be at offset
+ @ 0x3C in the MSDOS header.
+ @ The only 2 fields of the MSDOS header that are used are this
+ @ PE/COFF offset, and the "MZ" bytes at offset 0x0.
+ @
+ .long pe_header - start @ Offset to the PE header.
+
+pe_header:
+ .ascii "PE\0\0"
+
+coff_header:
+ .short 0x01c2 @ ARM or Thumb
+ .short 2 @ nr_sections
+ .long 0 @ TimeDateStamp
+ .long 0 @ PointerToSymbolTable
+ .long 1 @ NumberOfSymbols
+ .short section_table - optional_header
+ @ SizeOfOptionalHeader
+ .short 0x306 @ Characteristics.
+ @ IMAGE_FILE_32BIT_MACHINE |
+ @ IMAGE_FILE_DEBUG_STRIPPED |
+ @ IMAGE_FILE_EXECUTABLE_IMAGE |
+ @ IMAGE_FILE_LINE_NUMS_STRIPPED
+
+optional_header:
+ .short 0x10b @ PE32 format
+ .byte 0x02 @ MajorLinkerVersion
+ .byte 0x14 @ MinorLinkerVersion
+ .long _end - __efi_start @ SizeOfCode
+ .long 0 @ SizeOfInitializedData
+ .long 0 @ SizeOfUninitializedData
+ .long efi_stub_entry - start @ AddressOfEntryPoint
+ .long start_offset @ BaseOfCode
+ .long 0 @ data
+
+extra_header_fields:
+ .long 0 @ ImageBase
+ .long 0x200 @ SectionAlignment
+ .long 0x200 @ FileAlignment
+ .short 0 @ MajorOperatingSystemVersion
+ .short 0 @ MinorOperatingSystemVersion
+ .short 0 @ MajorImageVersion
+ .short 0 @ MinorImageVersion
+ .short 0 @ MajorSubsystemVersion
+ .short 0 @ MinorSubsystemVersion
+ .long 0 @ Win32VersionValue
+
+ .long _end - start @ SizeOfImage
+ .long start_offset @ SizeOfHeaders
+ .long 0 @ CheckSum
+ .short 0xa @ Subsystem (EFI application)
+ .short 0 @ DllCharacteristics
+ .long 0 @ SizeOfStackReserve
+ .long 0 @ SizeOfStackCommit
+ .long 0 @ SizeOfHeapReserve
+ .long 0 @ SizeOfHeapCommit
+ .long 0 @ LoaderFlags
+ .long 0x6 @ NumberOfRvaAndSizes
+
+ .quad 0 @ ExportTable
+ .quad 0 @ ImportTable
+ .quad 0 @ ResourceTable
+ .quad 0 @ ExceptionTable
+ .quad 0 @ CertificationTable
+ .quad 0 @ BaseRelocationTable
+
+section_table:
+ @
+ @ The EFI application loader requires a relocation section
+ @ because EFI applications must be relocatable. This is a
+ @ dummy section as far as we are concerned.
+ @
+ .ascii ".reloc\0\0"
+ .long 0 @ VirtualSize
+ .long 0 @ VirtualAddress
+ .long 0 @ SizeOfRawData
+ .long 0 @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long 0x42100040 @ Characteristics
+
+ .ascii ".text\0\0\0"
+ .long _end - __efi_start @ VirtualSize
+ .long __efi_start @ VirtualAddress
+ .long _edata - __efi_start @ SizeOfRawData
+ .long __efi_start @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long 0xe0500020 @ Characteristics
+
+ .align 9
+__efi_start:
+#endif
+ .endm
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 06e983f59980..af11c2f8f3b7 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -12,6 +12,8 @@
#include <asm/assembler.h>
#include <asm/v7m.h>
+#include "efi-header.S"
+
AR_CLASS( .arch armv7-a )
M_CLASS( .arch armv7-m )
@@ -126,7 +128,7 @@
start:
.type start,#function
.rept 7
- mov r0, r0
+ __nop
.endr
ARM( mov r0, r0 )
ARM( b 1f )
@@ -139,7 +141,8 @@ start:
.word 0x04030201 @ endianness flag
THUMB( .thumb )
-1:
+1: __EFI_HEADER
+
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
AR_CLASS( mrs r9, cpsr )
#ifdef CONFIG_ARM_VIRT_EXT
@@ -1353,6 +1356,53 @@ __enter_kernel:
reloc_code_end:
+#ifdef CONFIG_EFI_STUB
+ .align 2
+_start: .long start - .
+
+ENTRY(efi_stub_entry)
+ @ allocate space on stack for passing current zImage address
+ @ and for the EFI stub to return of new entry point of
+ @ zImage, as EFI stub may copy the kernel. Pointer address
+ @ is passed in r2. r0 and r1 are passed through from the
+ @ EFI firmware to efi_entry
+ adr ip, _start
+ ldr r3, [ip]
+ add r3, r3, ip
+ stmfd sp!, {r3, lr}
+ mov r2, sp @ pass zImage address in r2
+ bl efi_entry
+
+ @ Check for error return from EFI stub. r0 has FDT address
+ @ or error code.
+ cmn r0, #1
+ beq efi_load_fail
+
+ @ Preserve return value of efi_entry() in r4
+ mov r4, r0
+ bl cache_clean_flush
+ bl cache_off
+
+ @ Set parameters for booting zImage according to boot protocol
+ @ put FDT address in r2, it was returned by efi_entry()
+ @ r1 is the machine type, and r0 needs to be 0
+ mov r0, #0
+ mov r1, #0xFFFFFFFF
+ mov r2, r4
+
+ @ Branch to (possibly) relocated zImage that is in [sp]
+ ldr lr, [sp]
+ ldr ip, =start_offset
+ add lr, lr, ip
+ mov pc, lr @ no mode switch
+
+efi_load_fail:
+ @ Return EFI_LOAD_ERROR to EFI firmware on error.
+ ldr r0, =0x80000001
+ ldmfd sp!, {ip, pc}
+ENDPROC(efi_stub_entry)
+#endif
+
.align
.section ".stack", "aw", %nobits
.L_user_stack: .space 4096
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 2b60b843ac5e..81c493156ce8 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -48,6 +48,13 @@ SECTIONS
*(.rodata)
*(.rodata.*)
}
+ .data : {
+ /*
+ * The EFI stub always executes from RAM, and runs strictly before the
+ * decompressor, so we can make an exception for its r/w data, and keep it
+ */
+ *(.data.efistub)
+ }
.piggydata : {
*(.piggydata)
}
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index bd425302c97a..16da6380eb85 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -3,6 +3,7 @@
generic-y += bitsperlong.h
generic-y += cputime.h
generic-y += current.h
+generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index e7335a92144e..4e6e88a6b2f4 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -5,8 +5,6 @@
#include <linux/types.h>
#include <asm/opcodes.h>
-#ifdef CONFIG_BUG
-
/*
* Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
* We need to be careful not to conflict with those used by other modules and
@@ -47,7 +45,7 @@ do { \
unreachable(); \
} while (0)
-#else /* not CONFIG_DEBUG_BUGVERBOSE */
+#else
#define __BUG(__file, __line, __value) \
do { \
@@ -57,7 +55,6 @@ do { \
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define HAVE_ARCH_BUG
-#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0f8424924902..3848259bebf8 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -30,7 +30,7 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct device_node;
struct cpuidle_ops {
- int (*suspend)(int cpu, unsigned long arg);
+ int (*suspend)(unsigned long arg);
int (*init)(struct device_node *, int cpu);
};
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
new file mode 100644
index 000000000000..e0eea72deb87
--- /dev/null
+++ b/arch/arm/include/asm/efi.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_EFI_H
+#define __ASM_ARM_EFI_H
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/early_ioremap.h>
+#include <asm/fixmap.h>
+#include <asm/highmem.h>
+#include <asm/mach/map.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_EFI
+void efi_init(void);
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+
+#define efi_call_virt(f, ...) \
+({ \
+ efi_##f##_t *__f; \
+ efi_status_t __s; \
+ \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
+ __s = __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
+ __s; \
+})
+
+#define __efi_call_virt(f, ...) \
+({ \
+ efi_##f##_t *__f; \
+ \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
+ __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
+})
+
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+ check_and_switch_context(mm, NULL);
+}
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
+#else
+#define efi_init()
+#endif /* CONFIG_EFI */
+
+/* arch specific definitions used by the stub code */
+
+#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+
+/*
+ * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
+ * so we will reserve that amount of memory. We have no easy way to tell what
+ * the actuall size of code + data the uncompressed kernel will use.
+ * If this is insufficient, the decompressor will relocate itself out of the
+ * way before performing the decompression.
+ */
+#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
+
+/*
+ * The kernel zImage should preferably be located between 32 MB and 128 MB
+ * from the base of DRAM. The min address leaves space for a maximal size
+ * uncompressed image, and the max address is due to how the zImage decompressor
+ * picks a destination address.
+ */
+#define ZIMAGE_OFFSET_LIMIT SZ_128M
+#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
+#define MAX_FDT_OFFSET ZIMAGE_OFFSET_LIMIT
+
+#endif /* _ASM_ARM_EFI_H */
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 58cfe9f1a687..5c17d2dec777 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -19,20 +19,47 @@ enum fixed_addresses {
FIX_TEXT_POKE0,
FIX_TEXT_POKE1,
- __end_of_fixed_addresses
+ __end_of_fixmap_region,
+
+ /*
+ * Share the kmap() region with early_ioremap(): this is guaranteed
+ * not to clash since early_ioremap() is only available before
+ * paging_init(), and kmap() only after.
+ */
+#define NR_FIX_BTMAPS 32
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_early_ioremap_region
};
+static const enum fixed_addresses __end_of_fixed_addresses =
+ __end_of_fixmap_region > __end_of_early_ioremap_region ?
+ __end_of_fixmap_region : __end_of_early_ioremap_region;
+
#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY)
/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+#define __early_set_fixmap __set_fixmap
+
+#ifdef CONFIG_MMU
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h>
+#else
+
+static inline void early_fixmap_init(void) { }
+
+#endif
#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index fe3ea776dc34..3d7351c844aa 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 8
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index f98c7f32c9c8..9b7c328fb207 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -42,6 +42,8 @@ enum {
extern void iotable_init(struct map_desc *, int);
extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller);
+extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
+ bool ng);
#ifdef CONFIG_DEBUG_LL
extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr);
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 9b32f76bb0dd..432ce8176498 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -26,7 +26,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index 68ee3ce17b82..b4c6d99364f1 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -16,7 +16,7 @@
extern struct smp_operations psci_smp_ops;
-#ifdef CONFIG_ARM_PSCI
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
bool psci_smp_available(void);
#else
static inline bool psci_smp_available(void) { return false; }
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index e0adb9f1bf94..3613d7e9fc40 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -25,4 +25,10 @@ extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
+#ifdef CONFIG_ATAGS_PROC
+extern void save_atags(const struct tag *tags);
+#else
+static inline void save_atags(const struct tag *tags) { }
+#endif
+
#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index af9e59bf3831..f729085ece28 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -73,10 +73,10 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
perf_event_v7.o
-CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
+obj-$(CONFIG_EFI) += efi.o
ifneq ($(CONFIG_ARCH_EBSA110),y)
obj-y += io.o
@@ -88,8 +88,9 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y += psci-call.o
obj-$(CONFIG_SMP) += psci_smp.o
endif
+obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
+
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index f89811fb9a55..7e45f69a0ddc 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -16,6 +16,7 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/arm-smccc.h>
#include <asm/checksum.h>
#include <asm/ftrace.h>
@@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
EXPORT_SYMBOL(__pv_phys_pfn_offset);
EXPORT_SYMBOL(__pv_offset);
#endif
+
+#ifdef CONFIG_HAVE_ARM_SMCCC
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
+#endif
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
index ec4164da6e30..edfa2268c127 100644
--- a/arch/arm/kernel/atags.h
+++ b/arch/arm/kernel/atags.h
@@ -1,9 +1,3 @@
-#ifdef CONFIG_ATAGS_PROC
-extern void save_atags(struct tag *tags);
-#else
-static inline void save_atags(struct tag *tags) { }
-#endif
-
void convert_to_tag_list(struct tag *tags);
#ifdef CONFIG_ATAGS
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 318da33465f4..703926e7007b 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -56,7 +56,7 @@ int arm_cpuidle_suspend(int index)
int cpu = smp_processor_id();
if (cpuidle_ops[cpu].suspend)
- ret = cpuidle_ops[cpu].suspend(cpu, index);
+ ret = cpuidle_ops[cpu].suspend(index);
return ret;
}
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
new file mode 100644
index 000000000000..ff8a9d8acfac
--- /dev/null
+++ b/arch/arm/kernel/efi.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/mach/map.h>
+#include <asm/mmu_context.h>
+
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+ struct map_desc desc = {
+ .virtual = md->virt_addr,
+ .pfn = __phys_to_pfn(md->phys_addr),
+ .length = md->num_pages * EFI_PAGE_SIZE,
+ };
+
+ /*
+ * Order is important here: memory regions may have all of the
+ * bits below set (and usually do), so we check them in order of
+ * preference.
+ */
+ if (md->attribute & EFI_MEMORY_WB)
+ desc.type = MT_MEMORY_RWX;
+ else if (md->attribute & EFI_MEMORY_WT)
+ desc.type = MT_MEMORY_RWX_NONCACHED;
+ else if (md->attribute & EFI_MEMORY_WC)
+ desc.type = MT_DEVICE_WC;
+ else
+ desc.type = MT_DEVICE;
+
+ create_mapping_late(mm, &desc, true);
+ return 0;
+}
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index b6c8bb9315e7..907534f97053 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -88,7 +88,7 @@ __pendsv_entry:
@ execute the pending work, including reschedule
get_thread_info tsk
mov why, #0
- b ret_to_user
+ b ret_to_user_from_irq
ENDPROC(__pendsv_entry)
/*
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
index 8153e36b2491..7c9248b74d3f 100644
--- a/arch/arm/kernel/pj4-cp0.c
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -66,9 +66,13 @@ static void __init pj4_cp_access_write(u32 value)
__asm__ __volatile__ (
"mcr p15, 0, %1, c1, c0, 2\n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+ "isb\n\t"
+#else
"mrc p15, 0, %0, c1, c0, 2\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
+#endif
: "=r" (temp) : "r" (value));
}
diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
deleted file mode 100644
index a78e9e1e206d..000000000000
--- a/arch/arm/kernel/psci-call.S
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2015 ARM Limited
- *
- * Author: Mark Rutland <mark.rutland@arm.com>
- */
-
-#include <linux/linkage.h>
-
-#include <asm/opcodes-sec.h>
-#include <asm/opcodes-virt.h>
-
-/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-ENTRY(__invoke_psci_fn_hvc)
- __HVC(0)
- bx lr
-ENDPROC(__invoke_psci_fn_hvc)
-
-/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-ENTRY(__invoke_psci_fn_smc)
- __SMC(0)
- bx lr
-ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 20edd349d379..7d0cba6f1cc5 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/efi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -37,7 +38,9 @@
#include <asm/cp15.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/efi.h>
#include <asm/elf.h>
+#include <asm/early_ioremap.h>
#include <asm/fixmap.h>
#include <asm/procinfo.h>
#include <asm/psci.h>
@@ -375,6 +378,72 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+#ifdef CONFIG_ARM_PATCH_IDIV
+
+static inline u32 __attribute_const__ sdiv_instruction(void)
+{
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ /* "sdiv r0, r0, r1" */
+ u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
+ return __opcode_to_mem_thumb32(insn);
+ }
+
+ /* "sdiv r0, r0, r1" */
+ return __opcode_to_mem_arm(0xe710f110);
+}
+
+static inline u32 __attribute_const__ udiv_instruction(void)
+{
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ /* "udiv r0, r0, r1" */
+ u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
+ return __opcode_to_mem_thumb32(insn);
+ }
+
+ /* "udiv r0, r0, r1" */
+ return __opcode_to_mem_arm(0xe730f110);
+}
+
+static inline u32 __attribute_const__ bx_lr_instruction(void)
+{
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ /* "bx lr; nop" */
+ u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
+ return __opcode_to_mem_thumb32(insn);
+ }
+
+ /* "bx lr" */
+ return __opcode_to_mem_arm(0xe12fff1e);
+}
+
+static void __init patch_aeabi_idiv(void)
+{
+ extern void __aeabi_uidiv(void);
+ extern void __aeabi_idiv(void);
+ uintptr_t fn_addr;
+ unsigned int mask;
+
+ mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
+ if (!(elf_hwcap & mask))
+ return;
+
+ pr_info("CPU: div instructions available: patching division code\n");
+
+ fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
+ ((u32 *)fn_addr)[0] = udiv_instruction();
+ ((u32 *)fn_addr)[1] = bx_lr_instruction();
+ flush_icache_range(fn_addr, fn_addr + 8);
+
+ fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
+ ((u32 *)fn_addr)[0] = sdiv_instruction();
+ ((u32 *)fn_addr)[1] = bx_lr_instruction();
+ flush_icache_range(fn_addr, fn_addr + 8);
+}
+
+#else
+static inline void patch_aeabi_idiv(void) { }
+#endif
+
static void __init cpuid_init_hwcaps(void)
{
int block;
@@ -642,6 +711,7 @@ static void __init setup_processor(void)
elf_hwcap = list->elf_hwcap;
cpuid_init_hwcaps();
+ patch_aeabi_idiv();
#ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
@@ -956,8 +1026,8 @@ void __init setup_arch(char **cmdline_p)
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
- if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
- early_fixmap_init();
+ early_fixmap_init();
+ early_ioremap_init();
parse_early_param();
@@ -965,9 +1035,12 @@ void __init setup_arch(char **cmdline_p)
early_paging_init(mdesc);
#endif
setup_dma_zone(mdesc);
+ efi_init();
sanity_check_meminfo();
arm_memblock_init(mdesc);
+ early_ioremap_reset();
+
paging_init(mdesc);
request_standard_resources(mdesc);
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
new file mode 100644
index 000000000000..2e48b674aab1
--- /dev/null
+++ b/arch/arm/kernel/smccc-call.S
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+#include <asm/unwind.h>
+
+ /*
+ * Wrap c macros in asm macros to delay expansion until after the
+ * SMCCC asm macro is expanded.
+ */
+ .macro SMCCC_SMC
+ __SMC(0)
+ .endm
+
+ .macro SMCCC_HVC
+ __HVC(0)
+ .endm
+
+ .macro SMCCC instr
+UNWIND( .fnstart)
+ mov r12, sp
+ push {r4-r7}
+UNWIND( .save {r4-r7})
+ ldm r12, {r4-r7}
+ \instr
+ pop {r4-r7}
+ ldr r12, [sp, #(4 * 4)]
+ stm r12, {r0-r3}
+ bx lr
+UNWIND( .fnend)
+ .endm
+
+/*
+ * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+ SMCCC SMCCC_SMC
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+ SMCCC SMCCC_HVC
+ENDPROC(arm_smccc_hvc)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b26361355dae..37312f6749f3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -69,11 +69,15 @@ enum ipi_msg_type {
IPI_TIMER,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
- IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
- IPI_CPU_BACKTRACE = 15,
+ IPI_CPU_BACKTRACE,
+ /*
+ * SGI8-15 can be reserved by secure firmware, and thus may
+ * not be usable by the kernel. Please keep the above limited
+ * to at most 8 entries.
+ */
};
static DECLARE_COMPLETION(cpu_running);
@@ -475,7 +479,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
S(IPI_COMPLETION, "completion interrupts"),
@@ -525,7 +528,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
void arch_send_call_function_single_ipi(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_IRQ_WORK
@@ -620,12 +623,6 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
- case IPI_CALL_FUNC_SINGLE:
- irq_enter();
- generic_smp_call_function_single_interrupt();
- irq_exit();
- break;
-
case IPI_CPU_STOP:
irq_enter();
ipi_cpu_stop(cpu);
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 54a5aeab988d..994e971a8538 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -224,7 +224,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
VM_READ | VM_MAYREAD,
&vdso_data_mapping);
- return IS_ERR(vma) ? PTR_ERR(vma) : 0;
+ return PTR_ERR_OR_ZERO(vma);
}
/* assumes mmap_sem is write-locked */
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index af2267f6a529..9397b2e532af 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -205,6 +205,10 @@ Boston, MA 02111-1307, USA. */
.endm
+#ifdef CONFIG_ARM_PATCH_IDIV
+ .align 3
+#endif
+
ENTRY(__udivsi3)
ENTRY(__aeabi_uidiv)
UNWIND(.fnstart)
@@ -253,6 +257,10 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__umodsi3)
+#ifdef CONFIG_ARM_PATCH_IDIV
+ .align 3
+#endif
+
ENTRY(__divsi3)
ENTRY(__aeabi_idiv)
UNWIND(.fnstart)
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 04a56cc04dfa..809827265fb3 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
+#include <asm/setup.h>
#include <asm/mach/arch.h>
#include "common.h"
@@ -76,8 +77,17 @@ static const char *const n900_boards_compat[] __initconst = {
NULL,
};
+/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
+ * save them while the data is still not overwritten
+ */
+static void __init rx51_reserve(void)
+{
+ save_atags((const struct tag *)(PAGE_OFFSET + 0x100));
+ omap_reserve();
+}
+
DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
- .reserve = omap_reserve,
+ .reserve = rx51_reserve,
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
.init_machine = omap_generic_init,
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 493692d838c6..9f9d54271aad 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -790,7 +790,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
};
static int __init __l2c_init(const struct l2c_init_data *data,
- u32 aux_val, u32 aux_mask, u32 cache_id)
+ u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
{
struct outer_cache_fns fns;
unsigned way_size_bits, ways;
@@ -866,6 +866,10 @@ static int __init __l2c_init(const struct l2c_init_data *data,
fns.configure = outer_cache.configure;
if (data->fixup)
data->fixup(l2x0_base, cache_id, &fns);
+ if (nosync) {
+ pr_info("L2C: disabling outer sync\n");
+ fns.sync = NULL;
+ }
/*
* Check if l2x0 controller is already enabled. If we are booting
@@ -925,7 +929,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (data->save)
data->save(l2x0_base);
- __l2c_init(data, aux_val, aux_mask, cache_id);
+ __l2c_init(data, aux_val, aux_mask, cache_id, false);
}
#ifdef CONFIG_OF
@@ -1060,6 +1064,18 @@ static void __init l2x0_of_parse(const struct device_node *np,
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
+ if (of_property_read_bool(np, "arm,parity-enable")) {
+ mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+ val |= L2C_AUX_CTRL_PARITY_ENABLE;
+ } else if (of_property_read_bool(np, "arm,parity-disable")) {
+ mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+ }
+
+ if (of_property_read_bool(np, "arm,shared-override")) {
+ mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+ val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+ }
+
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
if (ret)
return;
@@ -1176,6 +1192,14 @@ static void __init l2c310_of_parse(const struct device_node *np,
*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
}
+ if (of_property_read_bool(np, "arm,parity-enable")) {
+ *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
+ *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+ } else if (of_property_read_bool(np, "arm,parity-disable")) {
+ *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+ *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+ }
+
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
@@ -1704,6 +1728,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
struct resource res;
u32 cache_id, old_aux;
u32 cache_level = 2;
+ bool nosync = false;
np = of_find_matching_node(NULL, l2x0_ids);
if (!np)
@@ -1742,6 +1767,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
if (cache_level != 2)
pr_err("L2C: device tree specifies invalid cache level\n");
+ nosync = of_property_read_bool(np, "arm,outer-sync-disable");
+
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
@@ -1756,6 +1783,6 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
- return __l2c_init(data, aux_val, aux_mask, cache_id);
+ return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
}
#endif
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c
index 0502ba17a3ab..a6fa7b73fbe0 100644
--- a/arch/arm/mm/cache-uniphier.c
+++ b/arch/arm/mm/cache-uniphier.c
@@ -377,17 +377,6 @@ static const struct of_device_id uniphier_cache_match[] __initconst = {
{ /* sentinel */ }
};
-static struct device_node * __init uniphier_cache_get_next_level_node(
- struct device_node *np)
-{
- u32 phandle;
-
- if (of_property_read_u32(np, "next-level-cache", &phandle))
- return NULL;
-
- return of_find_node_by_phandle(phandle);
-}
-
static int __init __uniphier_cache_init(struct device_node *np,
unsigned int *cache_level)
{
@@ -491,7 +480,7 @@ static int __init __uniphier_cache_init(struct device_node *np,
* next level cache fails because we want to continue with available
* cache levels.
*/
- next_np = uniphier_cache_get_next_level_node(np);
+ next_np = of_find_next_cache_node(np);
if (next_np) {
(*cache_level)++;
ret = __uniphier_cache_init(next_np, cache_level);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7f8cd1b3557f..49bd08178008 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -192,7 +192,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
- return memblock_is_memory(__pfn_to_phys(pfn));
+ return memblock_is_map_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
#endif
@@ -433,6 +433,9 @@ static void __init free_highpages(void)
if (end <= max_low)
continue;
+ if (memblock_is_nomap(mem))
+ continue;
+
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0c81056c1dd7..66a978d05958 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -30,6 +30,7 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
+#include <asm/early_ioremap.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -469,3 +470,11 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
#endif
+
+/*
+ * Must be called after early_fixmap_init
+ */
+void __init early_ioremap_init(void)
+{
+ early_ioremap_setup();
+}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4867f5daf82c..a87f6cc3fa2b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -390,7 +390,7 @@ void __init early_fixmap_init(void)
* The early fixmap range spans multiple pmds, for which
* we are not prepared:
*/
- BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+ BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
!= FIXADDR_TOP >> PMD_SHIFT);
pmd = fixmap_pmd(FIXADDR_TOP);
@@ -572,7 +572,7 @@ static void __init build_mem_type_table(void)
* in the Short-descriptor translation table format descriptors.
*/
if (cpu_arch == CPU_ARCH_ARMv7 &&
- (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+ (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
user_pmd_table |= PMD_PXNTABLE;
}
#endif
@@ -724,30 +724,49 @@ static void __init *early_alloc(unsigned long sz)
return early_alloc_aligned(sz, sz);
}
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static void *__init late_alloc(unsigned long sz)
+{
+ void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
+
+ BUG_ON(!ptr);
+ return ptr;
+}
+
+static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr,
+ unsigned long prot,
+ void *(*alloc)(unsigned long sz))
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+ pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
__pmd_populate(pmd, __pa(pte), prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
+ unsigned long prot)
+{
+ return pte_alloc(pmd, addr, prot, early_alloc);
+}
+
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
- const struct mem_type *type)
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz),
+ bool ng)
{
- pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+ pte_t *pte = pte_alloc(pmd, addr, type->prot_l1, alloc);
do {
- set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
+ set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
+ ng ? PTE_EXT_NG : 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type, bool ng)
{
pmd_t *p = pmd;
@@ -765,7 +784,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
pmd++;
#endif
do {
- *pmd = __pmd(phys | type->prot_sect);
+ *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
@@ -774,7 +793,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz), bool ng)
{
pmd_t *pmd = pmd_offset(pud, addr);
unsigned long next;
@@ -792,10 +812,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
*/
if (type->prot_sect &&
((addr | next | phys) & ~SECTION_MASK) == 0) {
- __map_init_section(pmd, addr, next, phys, type);
+ __map_init_section(pmd, addr, next, phys, type, ng);
} else {
alloc_init_pte(pmd, addr, next,
- __phys_to_pfn(phys), type);
+ __phys_to_pfn(phys), type, alloc, ng);
}
phys += next - addr;
@@ -805,21 +825,24 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz), bool ng)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
#ifndef CONFIG_ARM_LPAE
-static void __init create_36bit_mapping(struct map_desc *md,
- const struct mem_type *type)
+static void __init create_36bit_mapping(struct mm_struct *mm,
+ struct map_desc *md,
+ const struct mem_type *type,
+ bool ng)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -859,7 +882,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset(mm, addr);
end = addr + length;
do {
pud_t *pud = pud_offset(pgd, addr);
@@ -867,7 +890,8 @@ static void __init create_36bit_mapping(struct map_desc *md,
int i;
for (i = 0; i < 16; i++)
- *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
+ *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
+ (ng ? PMD_SECT_nG : 0));
addr += SUPERSECTION_SIZE;
phys += SUPERSECTION_SIZE;
@@ -876,33 +900,15 @@ static void __init create_36bit_mapping(struct map_desc *md,
}
#endif /* !CONFIG_ARM_LPAE */
-/*
- * Create the page directory entries and any necessary
- * page tables for the mapping specified by `md'. We
- * are able to cope here with varying sizes and address
- * offsets, and we take full advantage of sections and
- * supersections.
- */
-static void __init create_mapping(struct map_desc *md)
+static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
+ void *(*alloc)(unsigned long sz),
+ bool ng)
{
unsigned long addr, length, end;
phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
- if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
- pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
- return;
- }
-
- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
- (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
- pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
- }
-
type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE
@@ -910,7 +916,7 @@ static void __init create_mapping(struct map_desc *md)
* Catch 36-bit addresses
*/
if (md->pfn >= 0x100000) {
- create_36bit_mapping(md, type);
+ create_36bit_mapping(mm, md, type, ng);
return;
}
#endif
@@ -925,12 +931,12 @@ static void __init create_mapping(struct map_desc *md)
return;
}
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset(mm, addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type);
+ alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
phys += next - addr;
addr = next;
@@ -938,6 +944,43 @@ static void __init create_mapping(struct map_desc *md)
}
/*
+ * Create the page directory entries and any necessary
+ * page tables for the mapping specified by `md'. We
+ * are able to cope here with varying sizes and address
+ * offsets, and we take full advantage of sections and
+ * supersections.
+ */
+static void __init create_mapping(struct map_desc *md)
+{
+ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+ pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ return;
+ }
+
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ }
+
+ __create_mapping(&init_mm, md, early_alloc, false);
+}
+
+void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
+ bool ng)
+{
+#ifdef CONFIG_ARM_LPAE
+ pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
+ if (WARN_ON(!pud))
+ return;
+ pmd_alloc(mm, pud, 0);
+#endif
+ __create_mapping(mm, md, late_alloc, ng);
+}
+
+/*
* Create the architecture specific mappings
*/
void __init iotable_init(struct map_desc *io_desc, int nr)
@@ -1392,6 +1435,9 @@ static void __init map_lowmem(void)
phys_addr_t end = start + reg->size;
struct map_desc map;
+ if (memblock_is_nomap(reg))
+ continue;
+
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 8e1ea433c3f1..0f92d575a304 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -274,10 +274,12 @@ __v7_ca15mp_setup:
__v7_b15mp_setup:
__v7_ca17mp_setup:
mov r10, #0
-1: adr r12, __v7_setup_stack @ the local stack
- stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+1: adr r0, __v7_setup_stack_ptr
+ ldr r12, [r0]
+ add r12, r12, r0 @ the local stack
+ stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
- ldmia r12, {r0-r5, lr}
+ ldmia r12, {r1-r6, lr}
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -415,10 +417,12 @@ __v7_pj4b_setup:
#endif /* CONFIG_CPU_PJ4B */
__v7_setup:
- adr r12, __v7_setup_stack @ the local stack
- stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+ adr r0, __v7_setup_stack_ptr
+ ldr r12, [r0]
+ add r12, r12, r0 @ the local stack
+ stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
- ldmia r12, {r0-r5, lr}
+ ldmia r12, {r1-r6, lr}
__v7_setup_cont:
and r0, r9, #0xff000000 @ ARM?
@@ -480,11 +484,16 @@ __errata_finish:
orr r0, r0, r6 @ set them
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
ret lr @ return to head.S:__ret
+
+ .align 2
+__v7_setup_stack_ptr:
+ .word __v7_setup_stack - .
ENDPROC(__v7_setup)
+ .bss
.align 2
__v7_setup_stack:
- .space 4 * 7 @ 12 registers
+ .space 4 * 7 @ 7 registers
__INITDATA
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 67d9209077c6..7229d8d0be1a 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -12,6 +12,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/memory.h>
#include <asm/v7m.h>
#include "proc-macros.S"
@@ -97,19 +98,19 @@ __v7m_setup:
mov r5, #0x00800000
str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority
- @ SVC to run the kernel in this mode
+ @ SVC to switch to handler mode. Notice that this requires sp to
+ @ point to writeable memory because the processor saves
+ @ some registers to the stack.
badr r1, 1f
ldr r5, [r12, #11 * 4] @ read the SVC vector entry
str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
mov r6, lr @ save LR
- mov r7, sp @ save SP
- ldr sp, =__v7m_setup_stack_top
+ ldr sp, =init_thread_union + THREAD_START_SP
cpsie i
svc #0
1: cpsid i
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
- mov sp, r7 @ restore SP
@ Special-purpose control register
mov r1, #1
@@ -123,11 +124,6 @@ __v7m_setup:
ret lr
ENDPROC(__v7m_setup)
- .align 2
-__v7m_setup_stack:
- .space 4 * 8 @ 8 registers
-__v7m_setup_stack_top:
-
define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ffa3c549a4ba..bf82ffe895bf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -93,6 +93,7 @@ config ARM64
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select HAVE_CONTEXT_TRACKING
+ select HAVE_ARM_SMCCC
help
ARM 64-bit (AArch64) Linux support.
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 474691f8b13a..27bf1e5180a1 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -14,10 +14,10 @@ CFLAGS_REMOVE_return_address.o = -pg
arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
+ hyp-stub.o psci.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
- smp.o smp_spin_table.o topology.o
+ smp.o smp_spin_table.o topology.o smccc-call.o
extra-$(CONFIG_EFI) := efi-entry.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 3b6d8cc9dfe0..678f30b05a45 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -26,6 +26,7 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/arm-smccc.h>
#include <asm/checksum.h>
@@ -68,3 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
+
+ /* arm-smccc */
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 25de8b244961..bb493d44445f 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -28,6 +28,7 @@
#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
#include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
int main(void)
{
@@ -161,5 +162,7 @@ int main(void)
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
+ DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
+ DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
return 0;
}
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
deleted file mode 100644
index cf83e61cd3b5..000000000000
--- a/arch/arm64/kernel/psci-call.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2015 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#include <linux/linkage.h>
-
-/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
-ENTRY(__invoke_psci_fn_hvc)
- hvc #0
- ret
-ENDPROC(__invoke_psci_fn_hvc)
-
-/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
-ENTRY(__invoke_psci_fn_smc)
- smc #0
- ret
-ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
new file mode 100644
index 000000000000..ae0496fa4235
--- /dev/null
+++ b/arch/arm64/kernel/smccc-call.S
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+ .macro SMCCC instr
+ .cfi_startproc
+ \instr #0
+ ldr x4, [sp]
+ stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
+ stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
+ ret
+ .cfi_endproc
+ .endm
+
+/*
+ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+ SMCCC smc
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ * unsigned long a3, unsigned long a4, unsigned long a5,
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+ SMCCC hvc
+ENDPROC(arm_smccc_hvc)