From 1baa82f48030f38d1895301f1ec93acbcb3d15db Mon Sep 17 00:00:00 2001 From: Peter Crosthwaite Date: Mon, 2 Mar 2015 19:19:14 +0000 Subject: arm64: Implement cpu_relax as yield ARM64 has the yield nop hint which has the intended semantics of cpu_relax. Implement. The immediate application is ARM CPU emulators. An emulator can take advantage of the yield hint to de-prioritise an emulated CPU in favor of other emulation tasks. QEMU A64 SMP emulation has yield awareness, and sees a significant boot time performance increase with this change. Signed-off-by: Peter Crosthwaite Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 20e9591a60cf..d2c37a1df0eb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -#define cpu_relax() barrier() +static inline void cpu_relax(void) +{ + asm volatile("yield" ::: "memory"); +} + #define cpu_relax_lowlatency() cpu_relax() /* Thread switching */ -- cgit v1.2.3 From 137650aad96c9594683445e41afa8ac5a2097520 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Mar 2015 16:14:34 +0000 Subject: arm64: apply alternatives for !SMP kernels Currently we only perform alternative patching for kernels built with CONFIG_SMP, as we call apply_alternatives_all() in smp.c, which is only built for CONFIG_SMP. Thus !SMP kernels may not have necessary alternatives patched in. This patch ensures that we call apply_alternatives_all() once all CPUs are booted, even for !SMP kernels, by having the smp_init_cpus() stub call this for !SMP kernels via up_late_init. A new wrapper, do_post_cpus_up_work, is added so we can hook other calls here later (e.g. boot mode logging). Cc: Andre Przywara Cc: Catalin Marinas Fixes: e039ee4ee3fcf174 ("arm64: add alternative runtime patching") Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 4 ++++ arch/arm64/include/asm/smp_plat.h | 2 ++ arch/arm64/kernel/setup.c | 12 ++++++++++++ arch/arm64/kernel/smp.c | 2 +- 4 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b8e97331ffb..0d46debbc9a7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -470,6 +470,10 @@ config HOTPLUG_CPU source kernel/Kconfig.preempt +config UP_LATE_INIT + def_bool y + depends on !SMP + config HZ int default 100 diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 59e282311b58..8dcd61e32176 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) extern u64 __cpu_logical_map[NR_CPUS]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +void __init do_post_cpus_up_work(void); + #endif /* __ASM_SMP_PLAT_H */ diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index e8420f635bd4..781f4697dc26 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void) } #endif +void __init do_post_cpus_up_work(void) +{ + apply_alternatives_all(); +} + +#ifdef CONFIG_UP_LATE_INIT +void __init up_late_init(void) +{ + do_post_cpus_up_work(); +} +#endif /* CONFIG_UP_LATE_INIT */ + static void __init setup_processor(void) { struct cpu_info *cpu_info; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 328b8ce4b007..4257369341e4 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -309,7 +309,7 @@ void cpu_die(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); - apply_alternatives_all(); + do_post_cpus_up_work(); } void __init smp_prepare_boot_cpu(void) -- cgit v1.2.3 From 424a383824faecce246691694fd5546859dac511 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Mar 2015 16:14:36 +0000 Subject: arm64: fix hyp mode mismatch detection Commit 828e9834e9a5b7e6 ("arm64: head: create a new function for setting the boot_cpu_mode flag") added BOOT_CPU_MODE_EL1, a nonzero value replacing uses of zero. However it failed to update __boot_cpu_mode appropriately. A CPU booted at EL2 writes BOOT_CPU_MODE_EL2 to __boot_cpu_mode[0], and a CPU booted at EL1 writes BOOT_CPU_MODE_EL1 to __boot_cpu_mode[1]. Later is_hyp_mode_mismatched() determines there to be a mismatch if __boot_cpu_mode[0] != __boot_cpu_mode[1]. If all CPUs are booted at EL1, __boot_cpu_mode[0] will be set to BOOT_CPU_MODE_EL1, but __boot_cpu_mode[1] will retain its initial value of zero, and is_hyp_mode_mismatched will erroneously determine that the boot modes are mismatched. This hasn't been a problem so far, but later patches which will make use of is_hyp_mode_mismatched() expect it to work correctly. This patch initialises __boot_cpu_mode[1] to BOOT_CPU_MODE_EL1, fixing the erroneous mismatch detection when all CPUs are booted at EL1. Cc: Catalin Marinas Cc: Marc Zyngier Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 07f930540f4a..d17649d39392 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -588,7 +588,7 @@ ENDPROC(set_cpu_boot_mode_flag) .align L1_CACHE_SHIFT ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 - .long 0 + .long BOOT_CPU_MODE_EL1 .popsection #ifdef CONFIG_SMP -- cgit v1.2.3 From 667f3fd3950c123fd62d3b15d9db80926e75f1f0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Mar 2015 16:14:37 +0000 Subject: arm64: log CPU boot modes We currently don't log the boot mode for arm64 as we do for arm, and without KVM the user is provided with no indication as to which mode(s) CPUs were booted in, which can seriously hinder debugging in some cases. Add logging to the boot path once all CPUs are up. Where CPUs are mismatched in violation of the boot protocol, WARN and set a taint (as we do for CPU other CPU feature mismatches) given that the firmware/bootloader is buggy and should be fixed. Cc: Catalin Marinas Cc: Marc Zyngier Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/setup.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 781f4697dc26..14808947bf46 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -62,6 +62,7 @@ #include #include #include +#include unsigned int processor_id; EXPORT_SYMBOL(processor_id); @@ -207,8 +208,20 @@ static void __init smp_build_mpidr_hash(void) } #endif +static void __init hyp_mode_check(void) +{ + if (is_hyp_mode_available()) + pr_info("CPU: All CPU(s) started at EL2\n"); + else if (is_hyp_mode_mismatched()) + WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, + "CPU: CPUs started in inconsistent modes"); + else + pr_info("CPU: All CPU(s) started at EL1\n"); +} + void __init do_post_cpus_up_work(void) { + hyp_mode_check(); apply_alternatives_all(); } -- cgit v1.2.3 From ad08fd494bf00c03ae372e0bbd9cefa37bf608d6 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Mon, 16 Mar 2015 09:30:39 +0000 Subject: arm64: Adjust EFI libstub object include logic Commit f4f75ad5 ("efi: efistub: Convert into static library") introduced a static library for EFI stub, libstub. The EFI libstub directory is referenced by the kernel build system via a obj subdirectory rule in: drivers/firmware/efi/Makefile Unfortunately, arm64 also references the EFI libstub via: libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ If we're unlucky, the kernel build system can enter libstub via two simultaneous threads resulting in build failures such as: fixdep: error opening depfile: drivers/firmware/efi/libstub/.efi-stub-helper.o.d: No such file or directory scripts/Makefile.build:257: recipe for target 'drivers/firmware/efi/libstub/efi-stub-helper.o' failed make[1]: *** [drivers/firmware/efi/libstub/efi-stub-helper.o] Error 2 Makefile:939: recipe for target 'drivers/firmware/efi/libstub' failed make: *** [drivers/firmware/efi/libstub] Error 2 make: *** Waiting for unfinished jobs.... This patch adjusts the arm64 Makefile to reference the compiled library explicitly (as is currently done in x86), rather than the directory. Fixes: f4f75ad5 efi: efistub: Convert into static library Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 69ceedc982a5..4d2a925998f9 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/ core-$(CONFIG_XEN) += arch/arm64/xen/ core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ libs-y := arch/arm64/lib/ $(libs-y) -libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ +core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make KBUILD_IMAGE := Image.gz -- cgit v1.2.3 From 18ccb0cab49ef7868eaf9504f257e1a84683dbbd Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Mon, 16 Mar 2015 16:32:22 +0000 Subject: arm64: fix implementation of mmap2 compat syscall The arm mmap2 syscall takes the offset in units of 4K, thus with 64K pages the offset needs to be scaled to units of pages. Signed-off-by: Andreas Schwab Signed-off-by: Alexander Graf [will: removed redundant lr parameter, localised PAGE_SHIFT #if check] Signed-off-by: Will Deacon --- arch/arm64/include/asm/unistd32.h | 2 +- arch/arm64/kernel/entry32.S | 18 ++++++++++++++++++ arch/arm64/kernel/sys32.c | 1 + 3 files changed, 20 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 27224426e0bf..cef934a90f17 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork) #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ #define __NR_mmap2 192 -__SYSCALL(__NR_mmap2, sys_mmap_pgoff) +__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) #define __NR_truncate64 193 __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) #define __NR_ftruncate64 194 diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S index 9a8f6ae2530e..bd9bfaa9269b 100644 --- a/arch/arm64/kernel/entry32.S +++ b/arch/arm64/kernel/entry32.S @@ -19,9 +19,12 @@ */ #include +#include #include #include +#include +#include /* * System call wrappers for the AArch32 compatibility layer. @@ -53,6 +56,21 @@ ENTRY(compat_sys_fstatfs64_wrapper) b compat_sys_fstatfs64 ENDPROC(compat_sys_fstatfs64_wrapper) +/* + * Note: off_4k (w5) is always in units of 4K. If we can't do the + * requested offset because it is not page-aligned, we return -EINVAL. + */ +ENTRY(compat_sys_mmap2_wrapper) +#if PAGE_SHIFT > 12 + tst w5, #~PAGE_MASK >> 12 + b.ne 1f + lsr w5, w5, #PAGE_SHIFT - 12 +#endif + b sys_mmap_pgoff +1: mov x0, #-EINVAL + ret +ENDPROC(compat_sys_mmap2_wrapper) + /* * Wrappers for AArch32 syscalls that either take 64-bit parameters * in registers or that take 32-bit parameters which require sign diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 2d5ab3c90b82..a40b1343b819 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void); asmlinkage long compat_sys_fadvise64_64_wrapper(void); asmlinkage long compat_sys_sync_file_range2_wrapper(void); asmlinkage long compat_sys_fallocate_wrapper(void); +asmlinkage long compat_sys_mmap2_wrapper(void); #undef __SYSCALL #define __SYSCALL(nr, sym) [nr] = sym, -- cgit v1.2.3 From a8fcd8b192bb15debe1db276e073c6ba9a3fc953 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 16 Mar 2015 16:32:23 +0000 Subject: arm64: Enable CONFIG_COMPAT also for 64k page size With binutils 2.25 the default alignment for 32bit arm sections changed to have everything 64k aligned. Armv7 binaries built with this binutils version run successfully on an arm64 system. Since effectively there is now the chance to run armv7 code on arm64 even with 64k page size, it doesn't make sense to block people from enabling CONFIG_COMPAT on those configurations. Signed-off-by: Alexander Graf Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0d46debbc9a7..c102d92aad40 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -674,7 +674,7 @@ source "fs/Kconfig.binfmt" config COMPAT bool "Kernel support for 32-bit EL0" - depends on !ARM64_64K_PAGES + depends on !ARM64_64K_PAGES || EXPERT select COMPAT_BINFMT_ELF select HAVE_UID16 select OLD_SIGSUSPEND3 @@ -685,6 +685,10 @@ config COMPAT the user helper functions, VFP support and the ptrace interface are handled appropriately by the kernel. + If you also enabled CONFIG_ARM64_64K_PAGES, please be aware that you + will only be able to execute AArch32 binaries that were compiled with + 64k aligned segments. + If you want to execute 32-bit userspace applications, say Y. config SYSVIPC_COMPAT -- cgit v1.2.3 From e6a2e1b6c24a3993ffbb69a05dda202d2830ad90 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 1 Mar 2015 10:14:39 +0000 Subject: arm64: mm: unexport set_memory_ro and set_memory_rw This effectively unexports set_memory_ro and set_memory_rw functions from commit 11d91a770f1f ("arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support"). No module user of those is in mainline kernel and we explicitly do not want modules to use these functions, as they i.e. RO-protect eBPF (interpreted and JIT'ed) images from malicious modifications/bugs. Outside of eBPF scope, I believe also other set_memory_* functions should be unexported on arm64 due to non-existant mainline module user. Laura mentioned that they have some uses for modules doing set_memory_*, but none that are in mainline and it's unclear if they would ever get there. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Laura Abbott Signed-off-by: Will Deacon --- arch/arm64/mm/pageattr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 1d3ec3ddd84b..e47ed1c5dce1 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -73,7 +73,6 @@ int set_memory_ro(unsigned long addr, int numpages) __pgprot(PTE_RDONLY), __pgprot(PTE_WRITE)); } -EXPORT_SYMBOL_GPL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { @@ -81,7 +80,6 @@ int set_memory_rw(unsigned long addr, int numpages) __pgprot(PTE_WRITE), __pgprot(PTE_RDONLY)); } -EXPORT_SYMBOL_GPL(set_memory_rw); int set_memory_nx(unsigned long addr, int numpages) { -- cgit v1.2.3 From 19fc577579c86fec6e2523baeb457b02e939796f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 4 Mar 2015 13:27:34 +0000 Subject: arm64: fixmap: make FIX_TEXT_POKE0 permanent The FIX_TEXT_POKE0 is currently at the end of the temporary fixmap slots, despite the fact that it can be used at any point during runtime (e.g. for poking the text of loaded modules), and thus should be a permanent fixmap slot (as is the case on arm and x86). This patch moves FIX_TEXT_POKE0 into the set of permanent fixmap slots. Cc: Catalin Marinas Cc: Kees Cook Acked-by: Ard Biesheuvel Acked-by: Laura Abbott Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/fixmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index defa0ff98250..926495686554 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -33,6 +33,7 @@ enum fixed_addresses { FIX_HOLE, FIX_EARLYCON_MEM_BASE, + FIX_TEXT_POKE0, __end_of_permanent_fixed_addresses, /* @@ -49,7 +50,6 @@ enum fixed_addresses { FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, - FIX_TEXT_POKE0, __end_of_fixed_addresses }; -- cgit v1.2.3 From b63dbef93f91d56cb4385fdd8d1765201d451136 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 4 Mar 2015 13:27:35 +0000 Subject: arm64: fixmap: check idx is definitely valid Fixmap indices are in the interval (FIX_HOLE, __end_of_fixed_addresses), but in __set_fixmap we only check idx <= __end_of_fixed_addresses, and therefore indices <= FIX_HOLE are erroneously accepted. If called with such an idx, __set_fixmap may corrupt page tables outside of the fixmap region. This patch ensures that we validate the idx against both endpoints of the interval. Cc: Catalin Marinas Cc: Kees Cook Acked-by: Ard Biesheuvel Acked-by: Laura Abbott Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c6daaf6c6f97..c9267acb699c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -627,10 +627,7 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long addr = __fix_to_virt(idx); pte_t *pte; - if (idx >= __end_of_fixed_addresses) { - BUG(); - return; - } + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); pte = fixmap_pte(addr); -- cgit v1.2.3 From 4a97abd44329bf7b9c57f020224da5f823c9c9ea Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Mar 2015 18:05:13 +0000 Subject: arm64/crypto: issue aese/aesmc instructions in pairs This changes the AES core transform implementations to issue aese/aesmc (and aesd/aesimc) in pairs. This enables a micro-architectural optimization in recent Cortex-A5x cores that improves performance by 50-90%. Measured performance in cycles per byte (Cortex-A57): CBC enc CBC dec CTR before 3.64 1.34 1.32 after 1.95 0.85 0.93 Note that this results in a ~5% performance decrease for older cores. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/crypto/aes-ce-ccm-core.S | 12 ++++++------ arch/arm64/crypto/aes-ce.S | 10 +++------- 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index 432e4841cd81..a2a7fbcacc14 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -101,19 +101,19 @@ ENTRY(ce_aes_ccm_final) 0: mov v4.16b, v3.16b 1: ld1 {v5.2d}, [x2], #16 /* load next round key */ aese v0.16b, v4.16b - aese v1.16b, v4.16b aesmc v0.16b, v0.16b + aese v1.16b, v4.16b aesmc v1.16b, v1.16b 2: ld1 {v3.2d}, [x2], #16 /* load next round key */ aese v0.16b, v5.16b - aese v1.16b, v5.16b aesmc v0.16b, v0.16b + aese v1.16b, v5.16b aesmc v1.16b, v1.16b 3: ld1 {v4.2d}, [x2], #16 /* load next round key */ subs w3, w3, #3 aese v0.16b, v3.16b - aese v1.16b, v3.16b aesmc v0.16b, v0.16b + aese v1.16b, v3.16b aesmc v1.16b, v1.16b bpl 1b aese v0.16b, v4.16b @@ -146,19 +146,19 @@ ENDPROC(ce_aes_ccm_final) ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ 2: /* inner loop: 3 rounds, 2x interleaved */ aese v0.16b, v4.16b - aese v1.16b, v4.16b aesmc v0.16b, v0.16b + aese v1.16b, v4.16b aesmc v1.16b, v1.16b 3: ld1 {v3.2d}, [x10], #16 /* load next round key */ aese v0.16b, v5.16b - aese v1.16b, v5.16b aesmc v0.16b, v0.16b + aese v1.16b, v5.16b aesmc v1.16b, v1.16b 4: ld1 {v4.2d}, [x10], #16 /* load next round key */ subs w7, w7, #3 aese v0.16b, v3.16b - aese v1.16b, v3.16b aesmc v0.16b, v0.16b + aese v1.16b, v3.16b aesmc v1.16b, v1.16b ld1 {v5.2d}, [x10], #16 /* load next round key */ bpl 2b diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 685a18f731eb..78f3cfe92c08 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S @@ -45,18 +45,14 @@ .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 aes\de \i0\().16b, \k\().16b - .ifnb \i1 - aes\de \i1\().16b, \k\().16b - .ifnb \i3 - aes\de \i2\().16b, \k\().16b - aes\de \i3\().16b, \k\().16b - .endif - .endif aes\mc \i0\().16b, \i0\().16b .ifnb \i1 + aes\de \i1\().16b, \k\().16b aes\mc \i1\().16b, \i1\().16b .ifnb \i3 + aes\de \i2\().16b, \k\().16b aes\mc \i2\().16b, \i2\().16b + aes\de \i3\().16b, \k\().16b aes\mc \i3\().16b, \i3\().16b .endif .endif -- cgit v1.2.3 From 06f75a1f6200042aa36ad40afb44dd72107b25d6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Mar 2015 16:42:26 +0000 Subject: ARM, arm64: kvm: get rid of the bounce page The HYP init bounce page is a runtime construct that ensures that the HYP init code does not cross a page boundary. However, this is something we can do perfectly well at build time, by aligning the code appropriately. For arm64, we just align to 4 KB, and enforce that the code size is less than 4 KB, regardless of the chosen page size. For ARM, the whole code is less than 256 bytes, so we tweak the linker script to align at a power of 2 upper bound of the code size Note that this also fixes a benign off-by-one error in the original bounce page code, where a bounce page would be allocated unnecessarily if the code was exactly 1 page in size. On ARM, it also fixes an issue with very large kernels reported by Arnd Bergmann, where stub sections with linker emitted veneers could erroneously trigger the size/alignment ASSERT() in the linker script. Tested-by: Marc Zyngier Reviewed-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm/kernel/vmlinux.lds.S | 18 +++++++++++++++--- arch/arm/kvm/init.S | 3 +++ arch/arm/kvm/mmu.c | 42 +++++------------------------------------ arch/arm64/kernel/vmlinux.lds.S | 17 +++++++++++------ 4 files changed, 34 insertions(+), 46 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b31aa73e8076..ba65f1217310 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -23,11 +23,20 @@ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ VMLINUX_SYMBOL(__idmap_text_end) = .; \ - . = ALIGN(32); \ + . = ALIGN(1 << LOG2CEIL(__hyp_idmap_size)); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ *(.hyp.idmap.text) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; +/* + * If the HYP idmap .text section is populated, it needs to be positioned + * such that it will not cross a page boundary in the final output image. + * So align it to the section size rounded up to the next power of 2. + * If __hyp_idmap_size is undefined, the section will be empty so define + * it as 0 in that case. + */ +PROVIDE(__hyp_idmap_size = 0); + #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x @@ -346,8 +355,11 @@ SECTIONS */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") + /* - * The HYP init code can't be more than a page long. + * The HYP init code can't be more than a page long, + * and should not cross a page boundary. * The above comment applies as well. */ -ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big") +ASSERT((__hyp_idmap_text_start & ~PAGE_MASK) + __hyp_idmap_size <= PAGE_SIZE, + "HYP init code too big or misaligned") diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..11fb1d56f449 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -157,3 +157,6 @@ target: @ We're now in the trampoline code, switch page tables __kvm_hyp_init_end: .popsection + + .global __hyp_idmap_size + .set __hyp_idmap_size, __kvm_hyp_init_end - __kvm_hyp_init diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..42a24d6b003b 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -37,7 +37,6 @@ static pgd_t *boot_hyp_pgd; static pgd_t *hyp_pgd; static DEFINE_MUTEX(kvm_hyp_pgd_mutex); -static void *init_bounce_page; static unsigned long hyp_idmap_start; static unsigned long hyp_idmap_end; static phys_addr_t hyp_idmap_vector; @@ -405,9 +404,6 @@ void free_boot_hyp_pgd(void) if (hyp_pgd) unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); - free_page((unsigned long)init_bounce_page); - init_bounce_page = NULL; - mutex_unlock(&kvm_hyp_pgd_mutex); } @@ -1498,39 +1494,11 @@ int kvm_mmu_init(void) hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); - if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { - /* - * Our init code is crossing a page boundary. Allocate - * a bounce page, copy the code over and use that. - */ - size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; - phys_addr_t phys_base; - - init_bounce_page = (void *)__get_free_page(GFP_KERNEL); - if (!init_bounce_page) { - kvm_err("Couldn't allocate HYP init bounce page\n"); - err = -ENOMEM; - goto out; - } - - memcpy(init_bounce_page, __hyp_idmap_text_start, len); - /* - * Warning: the code we just copied to the bounce page - * must be flushed to the point of coherency. - * Otherwise, the data may be sitting in L2, and HYP - * mode won't be able to observe it as it runs with - * caches off at that point. - */ - kvm_flush_dcache_to_poc(init_bounce_page, len); - - phys_base = kvm_virt_to_phys(init_bounce_page); - hyp_idmap_vector += phys_base - hyp_idmap_start; - hyp_idmap_start = phys_base; - hyp_idmap_end = phys_base + len; - - kvm_info("Using HYP init bounce page @%lx\n", - (unsigned long)phys_base); - } + /* + * We rely on the linker script to ensure at build time that the HYP + * init code does not cross a page boundary. + */ + BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5d9d2dca530d..a2c29865c3fe 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -23,10 +23,14 @@ jiffies = jiffies_64; #define HYPERVISOR_TEXT \ /* \ - * Force the alignment to be compatible with \ - * the vectors requirements \ + * Align to 4 KB so that \ + * a) the HYP vector table is at its minimum \ + * alignment of 2048 bytes \ + * b) the HYP init code will not cross a page \ + * boundary if its size does not exceed \ + * 4 KB (see related ASSERT() below) \ */ \ - . = ALIGN(2048); \ + . = ALIGN(SZ_4K); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ *(.hyp.idmap.text) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ @@ -163,10 +167,11 @@ SECTIONS } /* - * The HYP init code can't be more than a page long. + * The HYP init code can't be more than a page long, + * and should not cross a page boundary. */ -ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), - "HYP init code too big") +ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, + "HYP init code too big or misaligned") /* * If padding is applied before .head.text, virt<->phys conversions will fail. -- cgit v1.2.3 From 8fff105e13041e49b82f92eef034f363a6b1c071 Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Tue, 17 Mar 2015 18:14:59 +0000 Subject: arm64: perf: reject groups spanning multiple HW PMUs The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM64 PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM64 PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL) CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249 Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT) task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000 PC is at 0x0 LR is at validate_event+0x90/0xa8 pc : [<0000000000000000>] lr : [] pstate: 00000145 sp : ffffffc07b0a3ba0 [< (null)>] (null) [] armpmu_event_init+0x174/0x3cc [] perf_try_init_event+0x34/0x70 [] perf_init_event+0xe0/0x10c [] perf_event_alloc+0x288/0x358 [] SyS_perf_event_open+0x464/0x98c Code: bad PC value Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon Acked-by: Mark Rutland Acked-by: Peter Ziljstra (Intel) Signed-off-by: Suzuki K. Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 25a5308744b1..68a74151fa6c 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -322,22 +322,31 @@ out: } static int -validate_event(struct pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) { - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct arm_pmu *armpmu; struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; + armpmu = to_arm_pmu(event->pmu); return armpmu->get_event_idx(hw_events, &fake_event) >= 0; } @@ -355,15 +364,15 @@ validate_group(struct perf_event *event) memset(fake_used_mask, 0, sizeof(fake_used_mask)); fake_pmu.used_mask = fake_used_mask; - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; -- cgit v1.2.3 From 62aa9655b8a8eaa4cf86d091d977482da8489ed4 Mon Sep 17 00:00:00 2001 From: Ganapatrao Kulkarni Date: Wed, 18 Mar 2015 11:01:18 +0000 Subject: arm64: kconfig: increase NR_CPUS range to 2-4096. Raise the maximum CPU limit to 4096 in preparation for upcoming platforms with large core counts. Acked-by: Arnd Bergmann Signed-off-by: Ganapatrao Kulkarni Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c102d92aad40..02f67a9d7426 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -455,8 +455,8 @@ config SCHED_SMT places. If unsure say N here. config NR_CPUS - int "Maximum number of CPUs (2-64)" - range 2 64 + int "Maximum number of CPUs (2-4096)" + range 2 4096 depends on SMP # These have to remain sorted largest to smallest default "64" -- cgit v1.2.3 From a591ede4cd1cac02d3398a9ad332bd0bba460efe Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 18 Mar 2015 14:55:20 +0000 Subject: arm64: Get rid of struct cpu_table struct cpu_table is an artifact left from the (very) early days of the arm64 port, and its only real use is to allow the most beautiful "AArch64 Processor" string to be displayed at boot time. Really? Yes, really. Let's get rid of it. In order to avoid another BogoMips-gate, the aforementioned string is preserved. Acked-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/cputable.h | 30 ---------------- arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/asm-offsets.c | 4 --- arch/arm64/kernel/cputable.c | 33 ----------------- arch/arm64/kernel/head.S | 76 +++------------------------------------ arch/arm64/kernel/setup.c | 16 ++------- 6 files changed, 8 insertions(+), 153 deletions(-) delete mode 100644 arch/arm64/include/asm/cputable.h delete mode 100644 arch/arm64/kernel/cputable.c (limited to 'arch') diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/cputable.h deleted file mode 100644 index e3bd983d3661..000000000000 --- a/arch/arm64/include/asm/cputable.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * arch/arm64/include/asm/cputable.h - * - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_CPUTABLE_H -#define __ASM_CPUTABLE_H - -struct cpu_info { - unsigned int cpu_id_val; - unsigned int cpu_id_mask; - const char *cpu_name; - unsigned long (*cpu_setup)(void); -}; - -extern struct cpu_info *lookup_processor_type(unsigned int); - -#endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5ee07eee80c2..d5e70747c7a2 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -12,7 +12,7 @@ CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg # Object file lists. -arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ +arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index f7fa65d4c352..14dd3d1afa57 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -71,9 +70,6 @@ int main(void) BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); - DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info)); - DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup)); - BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c deleted file mode 100644 index fd3993cb060f..000000000000 --- a/arch/arm64/kernel/cputable.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * arch/arm64/kernel/cputable.c - * - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include - -#include - -extern unsigned long __cpu_setup(void); - -struct cpu_info cpu_table[] = { - { - .cpu_id_val = 0x000f0000, - .cpu_id_mask = 0x000f0000, - .cpu_name = "AArch64 Processor", - .cpu_setup = __cpu_setup, - }, - { /* Empty */ }, -}; diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d17649d39392..ebb9e630230a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -245,22 +245,12 @@ ENTRY(stext) bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl set_cpu_boot_mode_flag mrs x22, midr_el1 // x22=cpuid - mov x0, x22 - bl lookup_processor_type - mov x23, x0 // x23=current cpu_table - /* - * __error_p may end up out of range for cbz if text areas are - * aligned up to section sizes. - */ - cbnz x23, 1f // invalid processor (x23=0)? - b __error_p -1: + bl __vet_fdt bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* - * The following calls CPU specific code in a position independent - * manner. See arch/arm64/mm/proc.S for details. x23 = base of - * cpu_info structure selected by lookup_processor_type above. + * The following calls CPU setup code, see arch/arm64/mm/proc.S for + * details. * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ @@ -268,9 +258,7 @@ ENTRY(stext) // MMU has been enabled adrp lr, __enable_mmu // return (PIC) address add lr, lr, #:lo12:__enable_mmu - ldr x12, [x23, #CPU_INFO_SETUP] - add x12, x12, x28 // __virt_to_phys - br x12 // initialise processor + b __cpu_setup // initialise processor ENDPROC(stext) /* @@ -634,15 +622,9 @@ ENTRY(secondary_startup) * Common entry point for secondary CPUs. */ mrs x22, midr_el1 // x22=cpuid - mov x0, x22 - bl lookup_processor_type - mov x23, x0 // x23=current cpu_table - cbz x23, __error_p // invalid processor (x23=0)? pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 - ldr x12, [x23, #CPU_INFO_SETUP] - add x12, x12, x28 // __virt_to_phys - blr x12 // initialise processor + bl __cpu_setup // initialise processor ldr x21, =secondary_data ldr x27, =__secondary_switched // address to jump to after enabling the MMU @@ -708,51 +690,3 @@ ENDPROC(__calc_phys_offset) .align 3 1: .quad . .quad PAGE_OFFSET - -/* - * Exception handling. Something went wrong and we can't proceed. We ought to - * tell the user, but since we don't have any guarantee that we're even - * running on the right architecture, we do virtually nothing. - */ -__error_p: -ENDPROC(__error_p) - -__error: -1: nop - b 1b -ENDPROC(__error) - -/* - * This function gets the processor ID in w0 and searches the cpu_table[] for - * a match. It returns a pointer to the struct cpu_info it found. The - * cpu_table[] must end with an empty (all zeros) structure. - * - * This routine can be called via C code and it needs to work with the MMU - * both disabled and enabled (the offset is calculated automatically). - */ -ENTRY(lookup_processor_type) - adr x1, __lookup_processor_type_data - ldp x2, x3, [x1] - sub x1, x1, x2 // get offset between VA and PA - add x3, x3, x1 // convert VA to PA -1: - ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask - cbz w5, 2f // end of list? - and w6, w6, w0 - cmp w5, w6 - b.eq 3f - add x3, x3, #CPU_INFO_SZ - b 1b -2: - mov x3, #0 // unknown processor -3: - mov x0, x3 - ret -ENDPROC(lookup_processor_type) - - .align 3 - .type __lookup_processor_type_data, %object -__lookup_processor_type_data: - .quad . - .quad cpu_table - .size __lookup_processor_type_data, . - __lookup_processor_type_data diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 14808947bf46..3852405d70b5 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -84,7 +83,6 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -static const char *cpu_name; phys_addr_t __fdt_pointer __initdata; /* @@ -234,22 +232,12 @@ void __init up_late_init(void) static void __init setup_processor(void) { - struct cpu_info *cpu_info; u64 features, block; u32 cwg; int cls; - cpu_info = lookup_processor_type(read_cpuid_id()); - if (!cpu_info) { - printk("CPU configuration botched (ID %08x), unable to continue.\n", - read_cpuid_id()); - while (1); - } - - cpu_name = cpu_info->cpu_name; - - printk("CPU: %s [%08x] revision %d\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15); + printk("CPU: AArch64 Processor [%08x] revision %d\n", + read_cpuid_id(), read_cpuid_id() & 15); sprintf(init_utsname()->machine, ELF_PLATFORM); elf_hwcap = 0; -- cgit v1.2.3 From b784a5d97d0af4835dd0125a3e0e5d0fd48128d6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 4 Mar 2015 19:45:38 +0100 Subject: arm64: add macros for common adrp usages The adrp instruction is mostly used in combination with either an add, a ldr or a str instruction with the low bits of the referenced symbol in the 12-bit immediate of the followup instruction. Introduce the macros adr_l, ldr_l and str_l that encapsulate these common patterns. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 48 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 750bac4e637e..144b64ad96c3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -159,4 +159,52 @@ lr .req x30 // link register orr \rd, \lbits, \hbits, lsl #32 .endm +/* + * Pseudo-ops for PC-relative adr/ldr/str , where + * is within the range +/- 4 GB of the PC. + */ + /* + * @dst: destination register (64 bit wide) + * @sym: name of the symbol + * @tmp: optional scratch register to be used if == sp, which + * is not allowed in an adrp instruction + */ + .macro adr_l, dst, sym, tmp= + .ifb \tmp + adrp \dst, \sym + add \dst, \dst, :lo12:\sym + .else + adrp \tmp, \sym + add \dst, \tmp, :lo12:\sym + .endif + .endm + + /* + * @dst: destination register (32 or 64 bit wide) + * @sym: name of the symbol + * @tmp: optional 64-bit scratch register to be used if is a + * 32-bit wide register, in which case it cannot be used to hold + * the address + */ + .macro ldr_l, dst, sym, tmp= + .ifb \tmp + adrp \dst, \sym + ldr \dst, [\dst, :lo12:\sym] + .else + adrp \tmp, \sym + ldr \dst, [\tmp, :lo12:\sym] + .endif + .endm + + /* + * @src: source register (32 or 64 bit wide) + * @sym: name of the symbol + * @tmp: mandatory 64-bit scratch register to calculate the address + * while needs to be preserved. + */ + .macro str_l, src, sym, tmp + adrp \tmp, \sym + str \src, [\tmp, :lo12:\sym] + .endm + #endif /* __ASM_ASSEMBLER_H */ -- cgit v1.2.3 From a44ef51799109dccba751240e84ca2da937a88ed Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 4 Mar 2015 11:49:32 +0100 Subject: arm64: remove processor_id The global processor_id is assigned the MIDR_EL1 value of the boot CPU in the early init code, but is never referenced afterwards. As the relevance of the MIDR_EL1 value of the boot CPU is debatable anyway, especially under big.LITTLE, let's remove it before anyone starts using it. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Reviewed-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 7 +------ arch/arm64/kernel/setup.c | 3 --- 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ebb9e630230a..88f14a77eac0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -244,7 +244,6 @@ ENTRY(stext) bl el2_setup // Drop to EL1, w20=cpu_boot_mode bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl set_cpu_boot_mode_flag - mrs x22, midr_el1 // x22=cpuid bl __vet_fdt bl __create_page_tables // x25=TTBR0, x26=TTBR1 @@ -427,7 +426,6 @@ __switch_data: .quad __mmap_switched .quad __bss_start // x6 .quad __bss_stop // x7 - .quad processor_id // x4 .quad __fdt_pointer // x5 .quad memstart_addr // x6 .quad init_thread_union + THREAD_START_SP // sp @@ -445,11 +443,10 @@ __mmap_switched: str xzr, [x6], #8 // Clear BSS b 1b 2: - ldp x4, x5, [x3], #16 + ldr x5, [x3], #8 ldr x6, [x3], #8 ldr x16, [x3] mov sp, x16 - str x22, [x4] // Save processor ID str x21, [x5] // Save FDT pointer str x24, [x6] // Save PHYS_OFFSET mov x29, #0 @@ -621,8 +618,6 @@ ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. */ - mrs x22, midr_el1 // x22=cpuid - pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 bl __cpu_setup // initialise processor diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 3852405d70b5..1783b38cf4c0 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -63,9 +63,6 @@ #include #include -unsigned int processor_id; -EXPORT_SYMBOL(processor_id); - unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); -- cgit v1.2.3 From a871d354f795c4960543fb44c9b59af63367d6cf Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 4 Mar 2015 11:51:48 +0100 Subject: arm64: remove __switch_data object from head.S This removes the confusing __switch_data object from head.S, and replaces it with standard PC-relative references to the various symbols it encapsulates. Reviewed-by: Catalin Marinas Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 88f14a77eac0..42ff10967dcc 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -253,7 +253,7 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ - ldr x27, __switch_data // address to jump to after + ldr x27, =__mmap_switched // address to jump to after // MMU has been enabled adrp lr, __enable_mmu // return (PIC) address add lr, lr, #:lo12:__enable_mmu @@ -420,35 +420,22 @@ __create_page_tables: ENDPROC(__create_page_tables) .ltorg - .align 3 - .type __switch_data, %object -__switch_data: - .quad __mmap_switched - .quad __bss_start // x6 - .quad __bss_stop // x7 - .quad __fdt_pointer // x5 - .quad memstart_addr // x6 - .quad init_thread_union + THREAD_START_SP // sp - /* - * The following fragment of code is executed with the MMU on in MMU mode, and - * uses absolute addresses; this is not position independent. + * The following fragment of code is executed with the MMU enabled. */ + .set initial_sp, init_thread_union + THREAD_START_SP __mmap_switched: - adr x3, __switch_data + 8 + adr_l x6, __bss_start + adr_l x7, __bss_stop - ldp x6, x7, [x3], #16 1: cmp x6, x7 b.hs 2f str xzr, [x6], #8 // Clear BSS b 1b 2: - ldr x5, [x3], #8 - ldr x6, [x3], #8 - ldr x16, [x3] - mov sp, x16 - str x21, [x5] // Save FDT pointer - str x24, [x6] // Save PHYS_OFFSET + adr_l sp, initial_sp, x4 + str_l x21, __fdt_pointer, x5 // Save FDT pointer + str_l x24, memstart_addr, x6 // Save PHYS_OFFSET mov x29, #0 b start_kernel ENDPROC(__mmap_switched) -- cgit v1.2.3 From b1c98297fe0c6e2899ede03fc3b831f36e19fb76 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 10 Mar 2015 15:00:03 +0100 Subject: arm64: use PC-relative reference for secondary_holding_pen_release Replace the confusing virtual/physical address arithmetic with a simple PC-relative reference. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 42ff10967dcc..818213186dac 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -564,10 +564,6 @@ ENTRY(__boot_cpu_mode) .popsection #ifdef CONFIG_SMP - .align 3 -1: .quad . - .quad secondary_holding_pen_release - /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. @@ -579,10 +575,7 @@ ENTRY(secondary_holding_pen) mrs x0, mpidr_el1 ldr x1, =MPIDR_HWID_BITMASK and x0, x0, x1 - adr x1, 1b - ldp x2, x3, [x1] - sub x1, x1, x2 - add x3, x3, x1 + adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] cmp x4, x0 b.eq secondary_startup -- cgit v1.2.3 From 8b0a95753a34b5c8b2e483e0e5b1d67761e32c5f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Mar 2015 08:59:53 +0100 Subject: arm64: merge __enable_mmu and __turn_mmu_on Enabling of the MMU is split into two functions, with an align and a branch in the middle. On arm64, the entire kernel Image is ID mapped so this is really not necessary, and we can just merge it into a single function. Also replaces an open coded adrp/add reference to __enable_mmu pair with adr_l. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 33 +++++++-------------------------- 1 file changed, 7 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 818213186dac..750403c62928 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -255,8 +255,7 @@ ENTRY(stext) */ ldr x27, =__mmap_switched // address to jump to after // MMU has been enabled - adrp lr, __enable_mmu // return (PIC) address - add lr, lr, #:lo12:__enable_mmu + adr_l lr, __enable_mmu // return (PIC) address b __cpu_setup // initialise processor ENDPROC(stext) @@ -615,11 +614,12 @@ ENDPROC(__secondary_switched) #endif /* CONFIG_SMP */ /* - * Setup common bits before finally enabling the MMU. Essentially this is just - * loading the page table pointer and vector base registers. + * Enable the MMU. * - * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on - * the MMU. + * x0 = SCTLR_EL1 value for turning on the MMU. + * x27 = *virtual* address to jump to upon completion + * + * other registers depend on the function called upon completion */ __enable_mmu: ldr x5, =vectors @@ -627,29 +627,10 @@ __enable_mmu: msr ttbr0_el1, x25 // load TTBR0 msr ttbr1_el1, x26 // load TTBR1 isb - b __turn_mmu_on -ENDPROC(__enable_mmu) - -/* - * Enable the MMU. This completely changes the structure of the visible memory - * space. You will not be able to trace execution through this. - * - * x0 = system control register - * x27 = *virtual* address to jump to upon completion - * - * other registers depend on the function called upon completion - * - * We align the entire function to the smallest power of two larger than it to - * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET - * close to the end of a 512MB or 1GB block we might require an additional - * table to map the entire function. - */ - .align 4 -__turn_mmu_on: msr sctlr_el1, x0 isb br x27 -ENDPROC(__turn_mmu_on) +ENDPROC(__enable_mmu) /* * Calculate the start of physical memory. -- cgit v1.2.3 From 6f4d57fa7021efbf135cfa068d56bc5035edffa1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Mar 2015 09:14:29 +0100 Subject: arm64: remove __calc_phys_offset This removes the function __calc_phys_offset and all open coded virtual to physical address translations using the offset kept in x28. Instead, just use absolute or PC-relative symbol references as appropriate when referring to virtual or physical addresses, respectively. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 47 +++++++++++------------------------------------ 1 file changed, 11 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 750403c62928..f5ac337f9598 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -36,7 +36,7 @@ #include #include -#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) +#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) #if (TEXT_OFFSET & 0xfff) != 0 #error TEXT_OFFSET must be at least 4KB aligned @@ -46,13 +46,6 @@ #error TEXT_OFFSET must be less than 2MB #endif - .macro pgtbl, ttb0, ttb1, virt_to_phys - ldr \ttb1, =swapper_pg_dir - ldr \ttb0, =idmap_pg_dir - add \ttb1, \ttb1, \virt_to_phys - add \ttb0, \ttb0, \virt_to_phys - .endm - #ifdef CONFIG_ARM64_64K_PAGES #define BLOCK_SHIFT PAGE_SHIFT #define BLOCK_SIZE PAGE_SIZE @@ -63,7 +56,7 @@ #define TABLE_SHIFT PUD_SHIFT #endif -#define KERNEL_START KERNEL_RAM_VADDR +#define KERNEL_START _text #define KERNEL_END _end /* @@ -242,7 +235,7 @@ section_table: ENTRY(stext) mov x21, x0 // x21=FDT bl el2_setup // Drop to EL1, w20=cpu_boot_mode - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET + adrp x24, __PHYS_OFFSET bl set_cpu_boot_mode_flag bl __vet_fdt @@ -342,7 +335,8 @@ ENDPROC(__vet_fdt) * - pgd entry for fixed mappings (TTBR1) */ __create_page_tables: - pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses + adrp x25, idmap_pg_dir + adrp x26, swapper_pg_dir mov x27, lr /* @@ -371,12 +365,10 @@ __create_page_tables: * Create the identity mapping. */ mov x0, x25 // idmap_pg_dir - ldr x3, =KERNEL_START - add x3, x3, x28 // __pa(KERNEL_START) + adrp x3, KERNEL_START // __pa(KERNEL_START) create_pgd_entry x0, x3, x5, x6 - ldr x6, =KERNEL_END mov x5, x3 // __pa(KERNEL_START) - add x6, x6, x28 // __pa(KERNEL_END) + adr_l x6, KERNEL_END // __pa(KERNEL_END) create_block_map x0, x7, x3, x5, x6 /* @@ -385,7 +377,7 @@ __create_page_tables: mov x0, x26 // swapper_pg_dir mov x5, #PAGE_OFFSET create_pgd_entry x0, x5, x3, x6 - ldr x6, =KERNEL_END + ldr x6, =KERNEL_END // __va(KERNEL_END) mov x3, x24 // phys offset create_block_map x0, x7, x3, x5, x6 @@ -537,8 +529,7 @@ ENDPROC(el2_setup) * in x20. See arch/arm64/include/asm/virt.h for more info. */ ENTRY(set_cpu_boot_mode_flag) - ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode - add x1, x1, x28 + adr_l x1, __boot_cpu_mode cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 @@ -569,7 +560,6 @@ ENTRY(__boot_cpu_mode) */ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w20=cpu_boot_mode - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 ldr x1, =MPIDR_HWID_BITMASK @@ -588,7 +578,6 @@ ENDPROC(secondary_holding_pen) */ ENTRY(secondary_entry) bl el2_setup // Drop to EL1 - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) @@ -597,7 +586,8 @@ ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. */ - pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 + adrp x25, idmap_pg_dir + adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor ldr x21, =secondary_data @@ -631,18 +621,3 @@ __enable_mmu: isb br x27 ENDPROC(__enable_mmu) - -/* - * Calculate the start of physical memory. - */ -__calc_phys_offset: - adr x0, 1f - ldp x1, x2, [x0] - sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET - add x24, x2, x28 // x24 = PHYS_OFFSET - ret -ENDPROC(__calc_phys_offset) - - .align 3 -1: .quad . - .quad PAGE_OFFSET -- cgit v1.2.3 From da9c177de88679c2948dc9a5e2325b0dff4677b9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Mar 2015 10:55:12 +0100 Subject: arm64: enforce x1|x2|x3 == 0 upon kernel entry as per boot protocol According to the arm64 boot protocol, registers x1 to x3 should be zero upon kernel entry, and non-zero values are reserved for future use. This future use is going to be problematic if we never enforce the current rules, so start enforcing them now, by emitting a warning if non-zero values are detected. Acked-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 19 ++++++++++++++++++- arch/arm64/kernel/setup.c | 11 +++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index f5ac337f9598..1fdf42041f42 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -233,7 +233,7 @@ section_table: #endif ENTRY(stext) - mov x21, x0 // x21=FDT + bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode adrp x24, __PHYS_OFFSET bl set_cpu_boot_mode_flag @@ -252,6 +252,23 @@ ENTRY(stext) b __cpu_setup // initialise processor ENDPROC(stext) +/* + * Preserve the arguments passed by the bootloader in x0 .. x3 + */ +preserve_boot_args: + mov x21, x0 // x21=FDT + + adr_l x0, boot_args // record the contents of + stp x21, x1, [x0] // x0 .. x3 at kernel entry + stp x2, x3, [x0, #16] + + dmb sy // needed before dc ivac with + // MMU off + + add x1, x0, #0x20 // 4 x 8 bytes + b __inval_cache_range // tail call +ENDPROC(preserve_boot_args) + /* * Determine validity of the x21 FDT pointer. * The dtb must be 8-byte aligned and live in the first 512M of memory. diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 1783b38cf4c0..51ef97274b52 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -115,6 +115,11 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +/* + * The recorded values of x0 .. x3 upon kernel entry. + */ +u64 __cacheline_aligned boot_args[4]; + void __init smp_setup_processor_id(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; @@ -412,6 +417,12 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif #endif + if (boot_args[1] || boot_args[2] || boot_args[3]) { + pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" + "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" + "This indicates a broken bootloader or old kernel\n", + boot_args[1], boot_args[2], boot_args[3]); + } } static int __init arm64_device_init(void) -- cgit v1.2.3 From ce47fbb7c8956742a6de06b9706b1c6236339f51 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Mar 2015 19:19:40 +0000 Subject: arm64: proc: remove unused cpu_get_pgd macro cpu_get_pgd isn't used anywhere and is Probably Not What You Want. Remove it before anybody decides to use it. Signed-off-by: Will Deacon --- arch/arm64/include/asm/proc-fns.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..4d9ede7b6361 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -41,15 +41,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) -#define cpu_get_pgd() \ -({ \ - unsigned long pg; \ - asm("mrs %0, ttbr0_el1\n" \ - : "=r" (pg)); \ - pg &= ~0xffff000000003ffful; \ - (pgd_t *)phys_to_virt(pg); \ -}) - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ -- cgit v1.2.3 From e60a1fec44a2fe2c85ac406a5c1161ca2957a4fa Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 23 Mar 2015 10:52:57 +0000 Subject: ARM: kvm: implement replacement for ld's LOG2CEIL() Commit 06f75a1f6200 ("ARM, arm64: kvm: get rid of the bounce page") uses ld's builtin function LOG2CEIL() to align the KVM init code to a log2 upper bound of its size. However, this function turns out to be a fairly recent addition to binutils, which breaks the build for older toolchains. So instead, implement a replacement LOG2_ROUNDUP() using the C preprocessor. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm/kernel/vmlinux.lds.S | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index ba65f1217310..2d760df0d57d 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -11,7 +11,27 @@ #ifdef CONFIG_ARM_KERNMEM_PERMS #include #endif - + +/* + * Poor man's version of LOG2CEIL(), which is + * not available in binutils before v2.24. + */ +#define LOG2_ROUNDUP(size) ( \ + __LOG2_ROUNDUP(size, 2) \ + __LOG2_ROUNDUP(size, 3) \ + __LOG2_ROUNDUP(size, 4) \ + __LOG2_ROUNDUP(size, 5) \ + __LOG2_ROUNDUP(size, 6) \ + __LOG2_ROUNDUP(size, 7) \ + __LOG2_ROUNDUP(size, 8) \ + __LOG2_ROUNDUP(size, 9) \ + __LOG2_ROUNDUP(size, 10) \ + __LOG2_ROUNDUP(size, 11) \ + 12) + +#define __LOG2_ROUNDUP(size, order) \ + (size) <= (1 << order) ? order : + #define PROC_INFO \ . = ALIGN(4); \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ @@ -23,7 +43,7 @@ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ VMLINUX_SYMBOL(__idmap_text_end) = .; \ - . = ALIGN(1 << LOG2CEIL(__hyp_idmap_size)); \ + . = ALIGN(1 << LOG2_ROUNDUP(__hyp_idmap_size)); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ *(.hyp.idmap.text) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; -- cgit v1.2.3 From dd006da21646f1c86f0242eb8f527d093303127a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Mar 2015 16:42:27 +0000 Subject: arm64: mm: increase VA range of identity map The page size and the number of translation levels, and hence the supported virtual address range, are build-time configurables on arm64 whose optimal values are use case dependent. However, in the current implementation, if the system's RAM is located at a very high offset, the virtual address range needs to reflect that merely because the identity mapping, which is only used to enable or disable the MMU, requires the extended virtual range to map the physical memory at an equal virtual offset. This patch relaxes that requirement, by increasing the number of translation levels for the identity mapping only, and only when actually needed, i.e., when system RAM's offset is found to be out of reach at runtime. Tested-by: Laura Abbott Reviewed-by: Catalin Marinas Tested-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 43 ++++++++++++++++++++++++++++++++++ arch/arm64/include/asm/page.h | 6 +++-- arch/arm64/include/asm/pgtable-hwdef.h | 7 +++++- arch/arm64/kernel/head.S | 37 +++++++++++++++++++++++++++++ arch/arm64/kernel/smp.c | 1 + arch/arm64/mm/mmu.c | 7 +++++- arch/arm64/mm/proc-macros.S | 10 ++++++++ arch/arm64/mm/proc.S | 3 +++ 8 files changed, 110 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..ecf2d060036b 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) : "r" (ttbr)); } +/* + * TCR.T0SZ value to use when the ID map is active. Usually equals + * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in + * physical memory, in which case it will be smaller. + */ +extern u64 idmap_t0sz; + +static inline bool __cpu_uses_extended_idmap(void) +{ + return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && + unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); +} + +static inline void __cpu_set_tcr_t0sz(u64 t0sz) +{ + unsigned long tcr; + + if (__cpu_uses_extended_idmap()) + asm volatile ( + " mrs %0, tcr_el1 ;" + " bfi %0, %1, %2, %3 ;" + " msr tcr_el1, %0 ;" + " isb" + : "=&r" (tcr) + : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); +} + +/* + * Set TCR.T0SZ to the value appropriate for activating the identity map. + */ +static inline void cpu_set_idmap_tcr_t0sz(void) +{ + __cpu_set_tcr_t0sz(idmap_t0sz); +} + +/* + * Set TCR.T0SZ to its default value (based on VA_BITS) + */ +static inline void cpu_set_default_tcr_t0sz(void) +{ + __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); +} + static inline void switch_new_context(struct mm_struct *mm) { unsigned long flags; diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 22b16232bd60..3d02b1869eb8 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -33,7 +33,9 @@ * image. Both require pgd, pud (4 levels only) and pmd tables to (section) * map the kernel. With the 64K page configuration, swapper and idmap need to * map to pte level. The swapper also maps the FDT (see __create_page_tables - * for more information). + * for more information). Note that the number of ID map translation levels + * could be increased on the fly if system RAM is out of reach for the default + * VA range, so 3 pages are reserved in all cases. */ #ifdef CONFIG_ARM64_64K_PAGES #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) @@ -42,7 +44,7 @@ #endif #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) -#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) +#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5f930cc9ea83..847e864202cc 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -143,7 +143,12 @@ /* * TCR flags. */ -#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) +#define TCR_T0SZ_OFFSET 0 +#define TCR_T1SZ_OFFSET 16 +#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) +#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) +#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) +#define TCR_TxSZ_WIDTH 6 #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 1fdf42041f42..51c9811e683c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -383,6 +383,43 @@ __create_page_tables: */ mov x0, x25 // idmap_pg_dir adrp x3, KERNEL_START // __pa(KERNEL_START) + +#ifndef CONFIG_ARM64_VA_BITS_48 +#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) +#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) + + /* + * If VA_BITS < 48, it may be too small to allow for an ID mapping to be + * created that covers system RAM if that is located sufficiently high + * in the physical address space. So for the ID map, use an extended + * virtual range in that case, by configuring an additional translation + * level. + * First, we have to verify our assumption that the current value of + * VA_BITS was chosen such that all translation levels are fully + * utilised, and that lowering T0SZ will always result in an additional + * translation level to be configured. + */ +#if VA_BITS != EXTRA_SHIFT +#error "Mismatch between VA_BITS and page size/number of translation levels" +#endif + + /* + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the + * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), + * this number conveniently equals the number of leading zeroes in + * the physical address of KERNEL_END. + */ + adrp x5, KERNEL_END + clz x5, x5 + cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? + b.ge 1f // .. then skip additional level + + str_l x5, idmap_t0sz, x6 + + create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 +1: +#endif + create_pgd_entry x0, x3, x5, x6 mov x5, x3 // __pa(KERNEL_START) adr_l x6, KERNEL_END // __pa(KERNEL_END) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 4257369341e4..ffe8e1b814e0 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); preempt_disable(); trace_hardirqs_off(); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c9267acb699c..428aaf86c95b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,6 +40,8 @@ #include "mm.h" +u64 idmap_t0sz = TCR_T0SZ(VA_BITS); + /* * Empty_zero_page is a special page that is used for zero-initialized data * and COW. @@ -454,6 +456,7 @@ void __init paging_init(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); } /* @@ -461,8 +464,10 @@ void __init paging_init(void) */ void setup_mm_for_reboot(void) { - cpu_switch_mm(idmap_pg_dir, &init_mm); + cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_idmap_tcr_t0sz(); + cpu_switch_mm(idmap_pg_dir, &init_mm); } /* diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 005d29e2977d..4c4d93c4bf65 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S @@ -52,3 +52,13 @@ mov \reg, #4 // bytes per word lsl \reg, \reg, \tmp // actual cache line size .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 28eebfb6af76..cdd754e19b9b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) msr cpacr_el1, x6 msr ttbr0_el1, x1 msr ttbr1_el1, x7 + tcr_set_idmap_t0sz x8, x7 msr tcr_el1, x8 msr vbar_el1, x9 msr mdscr_el1, x10 @@ -233,6 +234,8 @@ ENTRY(__cpu_setup) */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + tcr_set_idmap_t0sz x10, x9 + /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in * TCR_EL1. -- cgit v1.2.3 From e4c5a6851058386c9e109ad529717a23173918bc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Mar 2015 16:42:28 +0000 Subject: arm64: KVM: use ID map with increased VA range if required This patch modifies the HYP init code so it can deal with system RAM residing at an offset which exceeds the reach of VA_BITS. Like for EL1, this involves configuring an additional level of translation for the ID map. However, in case of EL2, this implies that all translations use the extra level, as we cannot seamlessly switch between translation tables with different numbers of translation levels. So add an extra translation table at the root level. Since the ID map and the runtime HYP map are guaranteed not to overlap, they can share this root level, and we can essentially merge these two tables into one. Tested-by: Marc Zyngier Reviewed-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm/include/asm/kvm_mmu.h | 10 ++++++++++ arch/arm/kvm/mmu.c | 27 +++++++++++++++++++++++++-- arch/arm64/include/asm/kvm_mmu.h | 33 +++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp-init.S | 25 +++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bf0fe99e8ca9..018760675b56 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -270,6 +270,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +static inline bool __kvm_cpu_uses_extended_idmap(void) +{ + return false; +} + +static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, + pgd_t *hyp_pgd, + pgd_t *merged_hyp_pgd, + unsigned long hyp_idmap_start) { } + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 42a24d6b003b..69c2b4ce6160 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -35,6 +35,7 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; static pgd_t *boot_hyp_pgd; static pgd_t *hyp_pgd; +static pgd_t *merged_hyp_pgd; static DEFINE_MUTEX(kvm_hyp_pgd_mutex); static unsigned long hyp_idmap_start; @@ -434,6 +435,11 @@ void free_hyp_pgds(void) free_pages((unsigned long)hyp_pgd, hyp_pgd_order); hyp_pgd = NULL; } + if (merged_hyp_pgd) { + clear_page(merged_hyp_pgd); + free_page((unsigned long)merged_hyp_pgd); + merged_hyp_pgd = NULL; + } mutex_unlock(&kvm_hyp_pgd_mutex); } @@ -1473,12 +1479,18 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) phys_addr_t kvm_mmu_get_httbr(void) { - return virt_to_phys(hyp_pgd); + if (__kvm_cpu_uses_extended_idmap()) + return virt_to_phys(merged_hyp_pgd); + else + return virt_to_phys(hyp_pgd); } phys_addr_t kvm_mmu_get_boot_httbr(void) { - return virt_to_phys(boot_hyp_pgd); + if (__kvm_cpu_uses_extended_idmap()) + return virt_to_phys(merged_hyp_pgd); + else + return virt_to_phys(boot_hyp_pgd); } phys_addr_t kvm_get_idmap_vector(void) @@ -1521,6 +1533,17 @@ int kvm_mmu_init(void) goto out; } + if (__kvm_cpu_uses_extended_idmap()) { + merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!merged_hyp_pgd) { + kvm_err("Failed to allocate extra HYP pgd\n"); + goto out; + } + __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, + hyp_idmap_start); + return 0; + } + /* Map the very same page at the trampoline VA */ err = __create_hyp_mappings(boot_hyp_pgd, TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..edfe6864bc28 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -68,6 +68,8 @@ #include #include #include +#include +#include #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) @@ -305,5 +307,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +static inline bool __kvm_cpu_uses_extended_idmap(void) +{ + return __cpu_uses_extended_idmap(); +} + +static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, + pgd_t *hyp_pgd, + pgd_t *merged_hyp_pgd, + unsigned long hyp_idmap_start) +{ + int idmap_idx; + + /* + * Use the first entry to access the HYP mappings. It is + * guaranteed to be free, otherwise we wouldn't use an + * extended idmap. + */ + VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); + merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); + + /* + * Create another extended level entry that points to the boot HYP map, + * which contains an ID mapping of the HYP init code. We essentially + * merge the boot and runtime HYP maps by doing so, but they don't + * overlap anyway, so this is fine. + */ + idmap_idx = hyp_idmap_start >> VA_BITS; + VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); + merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c3191168a994..178ba2248a98 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -20,6 +20,7 @@ #include #include #include +#include .text .pushsection .hyp.idmap.text, "ax" @@ -65,6 +66,25 @@ __do_hyp_init: and x4, x4, x5 ldr x5, =TCR_EL2_FLAGS orr x4, x4, x5 + +#ifndef CONFIG_ARM64_VA_BITS_48 + /* + * If we are running with VA_BITS < 48, we may be running with an extra + * level of translation in the ID map. This is only the case if system + * RAM is out of range for the currently configured page size and number + * of translation levels, in which case we will also need the extra + * level for the HYP ID map, or we won't be able to enable the EL2 MMU. + * + * However, at EL2, there is only one TTBR register, and we can't switch + * between translation tables *and* update TCR_EL2.T0SZ at the same + * time. Bottom line: we need the extra level in *both* our translation + * tables. + * + * So use the same T0SZ value we use for the ID map. + */ + ldr_l x5, idmap_t0sz + bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH +#endif msr tcr_el2, x4 ldr x4, =VTCR_EL2_FLAGS @@ -91,6 +111,10 @@ __do_hyp_init: msr sctlr_el2, x4 isb + /* Skip the trampoline dance if we merged the boot and runtime PGDs */ + cmp x0, x1 + b.eq merged + /* MMU is now enabled. Get ready for the trampoline dance */ ldr x4, =TRAMPOLINE_VA adr x5, target @@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */ tlbi alle2 dsb sy +merged: /* Set the stack and new vectors */ kern_hyp_va x2 mov sp, x2 -- cgit v1.2.3 From 91d57155dc5ab4b311624b7ee570339b6af19ad5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Mar 2015 13:50:27 +0000 Subject: arm64: head.S: ensure visibility of page tables After writing the page tables, we use __inval_cache_range to invalidate any stale cache entries. Strongly Ordered memory accesses are not ordered w.r.t. cache maintenance instructions, and hence explicit memory barriers are required to provide this ordering. However, __inval_cache_range was written to be used on Normal Cacheable memory once the MMU and caches are on, and does not have any barriers prior to the DC instructions. This patch adds a DMB between the page tables being written and the corresponding cachelines being invalidated, ensuring that the invalidation makes the new data visible to subsequent cacheable accesses. A barrier is not required before the prior invalidate as we do not access the page table memory area prior to this, and earlier barriers in preserve_boot_args and set_cpu_boot_mode_flag ensures ordering w.r.t. any stores performed prior to entering Linux. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Fixes: c218bca74eeafa2f ("arm64: Relax the kernel cache requirements for boot") Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 51c9811e683c..bbc474cd0ca8 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -458,6 +458,7 @@ __create_page_tables: */ mov x0, x25 add x1, x26, #SWAPPER_DIR_SIZE + dmb sy bl __inval_cache_range mov lr, x27 -- cgit v1.2.3 From d5efd9cc9cf2e422d064c912c7d5d985f52c1b2c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Mar 2015 11:54:10 +0000 Subject: arm64: pmu: add support for interrupt-affinity property Historically, the PMU devicetree bindings have expected SPIs to be listed in order of *logical* CPU number. This is problematic for bootloaders, especially when the boot CPU (logical ID 0) isn't listed first in the devicetree. This patch adds a new optional property, interrupt-affinity, to the PMU node which allows the interrupt affinity to be described using a list of phandled to CPU nodes, with each entry in the list corresponding to the SPI at the same index in the interrupts property. Cc: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/pmu.h | 1 + arch/arm64/kernel/perf_event.c | 57 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h index e6f087806aaf..b7710a59672c 100644 --- a/arch/arm64/include/asm/pmu.h +++ b/arch/arm64/include/asm/pmu.h @@ -44,6 +44,7 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; + int *irq_affinity; const char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct hw_perf_event *evt, int idx); diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 68a74151fa6c..195991dadc37 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -25,8 +25,10 @@ #include #include #include +#include #include #include +#include #include #include @@ -405,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu) free_percpu_irq(irq, &cpu_hw_events); } else { for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) + int cpu = i; + + if (armpmu->irq_affinity) + cpu = armpmu->irq_affinity[i]; + + if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq > 0) @@ -459,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { + int cpu = i; + err = 0; irq = platform_get_irq(pmu_device, i); if (irq <= 0) continue; + if (armpmu->irq_affinity) + cpu = armpmu->irq_affinity[i]; + /* * If we have a single PMU interrupt that we can't shift, * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + irq, cpu); continue; } @@ -485,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) return err; } - cpumask_set_cpu(i, &armpmu->active_irqs); + cpumask_set_cpu(cpu, &armpmu->active_irqs); } } @@ -1298,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = { static int armpmu_device_probe(struct platform_device *pdev) { + int i, *irqs; + if (!cpu_pmu) return -ENODEV; + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + for (i = 0; i < pdev->num_resources; ++i) { + struct device_node *dn; + int cpu; + + dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", + i); + if (!dn) { + pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", + of_node_full_name(dn), i); + break; + } + + for_each_possible_cpu(cpu) + if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) + break; + + of_node_put(dn); + if (cpu >= nr_cpu_ids) { + pr_warn("Failed to find logical CPU for %s\n", + dn->name); + break; + } + + irqs[i] = cpu; + } + + if (i == pdev->num_resources) + cpu_pmu->irq_affinity = irqs; + else + kfree(irqs); + cpu_pmu->plat_device = pdev; return 0; } -- cgit v1.2.3 From 0c20856c260236b96f54c452d38dbe1348ed34d2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Mar 2015 15:10:21 +0000 Subject: arm64: head.S: ensure idmap_t0sz is visible We write idmap_t0sz with SCTLR_EL1.{C,M} clear, but we only have the guarnatee that the kernel Image is clean, not invalid in the caches, and therefore we might read a stale value once the MMU is enabled. This patch ensures we invalidate the corresponding cacheline after the write as we do for all other data written before we set SCTLR_EL1.{C.M}, guaranteeing that the value will be visible later. We rely on the DSBs in __create_page_tables to complete the maintenance. Signed-off-by: Mark Rutland CC: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bbc474cd0ca8..19f915e8f6e0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -414,7 +414,10 @@ __create_page_tables: cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? b.ge 1f // .. then skip additional level - str_l x5, idmap_t0sz, x6 + adr_l x6, idmap_t0sz + str x5, [x6] + dmb sy + dc ivac, x6 // Invalidate potentially stale cache line create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 1: -- cgit v1.2.3 From 12eb3e833961bfe532b763a6e4e817ec87f48bc7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 24 Mar 2015 17:48:07 +0000 Subject: ARM: kvm: assert on HYP section boundaries not actual code size Using ASSERT() with an expression that involves a symbol that is only supplied through a PROVIDE() definition in the linker script itself is apparently not supported by some older versions of binutils. So instead, rewrite the expression so that only the section boundaries __hyp_idmap_text_start and __hyp_idmap_text_end are used. Note that this reverts the fix in 06f75a1f6200 ("ARM, arm64: kvm: get rid of the bounce page") for the ASSERT() being triggered erroneously when unrelated linker emitted veneers happen to end up in the HYP idmap region. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 2d760df0d57d..808398ec024e 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -381,5 +381,5 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") * and should not cross a page boundary. * The above comment applies as well. */ -ASSERT((__hyp_idmap_text_start & ~PAGE_MASK) + __hyp_idmap_size <= PAGE_SIZE, +ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, "HYP init code too big or misaligned") -- cgit v1.2.3 From a9fea8b388ed5838fe0744970e67f7019d420824 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 27 Mar 2015 10:18:01 +0000 Subject: ARM: kvm: round HYP section to page size instead of log2 upper bound Older binutils do not support expressions involving the values of external symbols so just round up the HYP region to the page size. Tested-by: Simon Horman Signed-off-by: Ard Biesheuvel [will: when will this ever end?!] Signed-off-by: Will Deacon --- arch/arm/kernel/vmlinux.lds.S | 31 +------------------------------ arch/arm/kvm/init.S | 3 --- 2 files changed, 1 insertion(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 808398ec024e..f2db429ea75d 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -12,26 +12,6 @@ #include #endif -/* - * Poor man's version of LOG2CEIL(), which is - * not available in binutils before v2.24. - */ -#define LOG2_ROUNDUP(size) ( \ - __LOG2_ROUNDUP(size, 2) \ - __LOG2_ROUNDUP(size, 3) \ - __LOG2_ROUNDUP(size, 4) \ - __LOG2_ROUNDUP(size, 5) \ - __LOG2_ROUNDUP(size, 6) \ - __LOG2_ROUNDUP(size, 7) \ - __LOG2_ROUNDUP(size, 8) \ - __LOG2_ROUNDUP(size, 9) \ - __LOG2_ROUNDUP(size, 10) \ - __LOG2_ROUNDUP(size, 11) \ - 12) - -#define __LOG2_ROUNDUP(size, order) \ - (size) <= (1 << order) ? order : - #define PROC_INFO \ . = ALIGN(4); \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ @@ -43,20 +23,11 @@ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ VMLINUX_SYMBOL(__idmap_text_end) = .; \ - . = ALIGN(1 << LOG2_ROUNDUP(__hyp_idmap_size)); \ + . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ *(.hyp.idmap.text) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; -/* - * If the HYP idmap .text section is populated, it needs to be positioned - * such that it will not cross a page boundary in the final output image. - * So align it to the section size rounded up to the next power of 2. - * If __hyp_idmap_size is undefined, the section will be empty so define - * it as 0 in that case. - */ -PROVIDE(__hyp_idmap_size = 0); - #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 11fb1d56f449..3988e72d16ff 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -157,6 +157,3 @@ target: @ We're now in the trampoline code, switch page tables __kvm_hyp_init_end: .popsection - - .global __hyp_idmap_size - .set __hyp_idmap_size, __kvm_hyp_init_end - __kvm_hyp_init -- cgit v1.2.3 From 0978fb25f86b7595821cee6955679250d47c6438 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Mar 2015 13:09:21 +0000 Subject: arm64: insn: Add aarch64_insn_decode_immediate Patching an instruction sometimes requires extracting the immediate field from this instruction. To facilitate this, and avoid potential duplication of code, add aarch64_insn_decode_immediate as the reciprocal to aarch64_insn_encode_immediate. Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 1 + arch/arm64/kernel/insn.c | 81 ++++++++++++++++++++++++++++++++++--------- 2 files changed, 66 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index d2f49423c5dc..f81b328d9cf4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn); int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); +u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index c8eca88f12e6..924902083e47 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -265,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) return aarch64_insn_patch_text_sync(addrs, insns, cnt); } -u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, - u32 insn, u64 imm) +static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, + u32 *maskp, int *shiftp) { - u32 immlo, immhi, lomask, himask, mask; + u32 mask; int shift; switch (type) { - case AARCH64_INSN_IMM_ADR: - lomask = 0x3; - himask = 0x7ffff; - immlo = imm & lomask; - imm >>= 2; - immhi = imm & himask; - imm = (immlo << 24) | (immhi); - mask = (lomask << 24) | (himask); - shift = 5; - break; case AARCH64_INSN_IMM_26: mask = BIT(26) - 1; shift = 0; @@ -320,9 +310,68 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, shift = 16; break; default: - pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", - type); - return 0; + return -EINVAL; + } + + *maskp = mask; + *shiftp = shift; + + return 0; +} + +#define ADR_IMM_HILOSPLIT 2 +#define ADR_IMM_SIZE SZ_2M +#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_LOSHIFT 29 +#define ADR_IMM_HISHIFT 5 + +u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) +{ + u32 immlo, immhi, mask; + int shift; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + shift = 0; + immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; + immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; + insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; + mask = ADR_IMM_SIZE - 1; + break; + default: + if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { + pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n", + type); + return 0; + } + } + + return (insn >> shift) & mask; +} + +u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm) +{ + u32 immlo, immhi, mask; + int shift; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + shift = 0; + immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; + imm >>= ADR_IMM_HILOSPLIT; + immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; + imm = immlo | immhi; + mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | + (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); + break; + default: + if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { + pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", + type); + return 0; + } } /* Update the immediate field. */ -- cgit v1.2.3 From fef7f2b2010381c795ae43743ad31931cc58f5ad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Mar 2015 13:09:22 +0000 Subject: arm64: alternative: Allow immediate branch as alternative instruction Since all immediate branches are PC-relative on Aarch64, these instructions cannot be used as an alternative with the simplistic approach we currently have (the immediate has been computed from the .altinstr_replacement section, and end-up being completely off if we insert it directly). This patch handles the b and bl instructions in a different way, using the insn framework to recompute the immediate, and generate the right displacement. Reviewed-by: Andre Przywara Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/kernel/alternative.c | 55 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index ad7821d64a1d..21033bba9390 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -24,6 +24,7 @@ #include #include #include +#include #include extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; @@ -33,6 +34,48 @@ struct alt_region { struct alt_instr *end; }; +/* + * Decode the imm field of a b/bl instruction, and return the byte + * offset as a signed value (so it can be used when computing a new + * branch target). + */ +static s32 get_branch_offset(u32 insn) +{ + s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); + + /* sign-extend the immediate before turning it into a byte offset */ + return (imm << 6) >> 4; +} + +static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr) +{ + u32 insn; + + aarch64_insn_read(altinsnptr, &insn); + + /* Stop the world on instructions we don't support... */ + BUG_ON(aarch64_insn_is_cbz(insn)); + BUG_ON(aarch64_insn_is_cbnz(insn)); + BUG_ON(aarch64_insn_is_bcond(insn)); + /* ... and there is probably more. */ + + if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { + enum aarch64_insn_branch_type type; + unsigned long target; + + if (aarch64_insn_is_b(insn)) + type = AARCH64_INSN_BRANCH_NOLINK; + else + type = AARCH64_INSN_BRANCH_LINK; + + target = (unsigned long)altinsnptr + get_branch_offset(insn); + insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr, + target, type); + } + + return insn; +} + static int __apply_alternatives(void *alt_region) { struct alt_instr *alt; @@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region) u8 *origptr, *replptr; for (alt = region->begin; alt < region->end; alt++) { + u32 insn; + int i; + if (!cpus_have_cap(alt->cpufeature)) continue; - BUG_ON(alt->alt_len > alt->orig_len); + BUG_ON(alt->alt_len != alt->orig_len); pr_info_once("patching kernel code\n"); origptr = (u8 *)&alt->orig_offset + alt->orig_offset; replptr = (u8 *)&alt->alt_offset + alt->alt_offset; - memcpy(origptr, replptr, alt->alt_len); + + for (i = 0; i < alt->alt_len; i += sizeof(insn)) { + insn = get_alt_insn(origptr + i, replptr + i); + aarch64_insn_write(origptr + i, insn); + } + flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + alt->alt_len)); } -- cgit v1.2.3 From 359b706473b47da3c93bd99fd10d798fe411ab67 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Mar 2015 13:09:23 +0000 Subject: arm64: Extract feature parsing code from cpu_errata.c As we detect more architectural features at runtime, it makes sense to reuse the existing framework whilst avoiding to call a feature an erratum... This patch extract the core capability parsing, moves it into a new file (cpufeature.c), and let the CPU errata detection code use it. Reviewed-by: Andre Przywara Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 15 ++++++++++++ arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/cpu_errata.c | 36 ++++------------------------ arch/arm64/kernel/cpufeature.c | 47 +++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 1 + 5 files changed, 68 insertions(+), 33 deletions(-) create mode 100644 arch/arm64/kernel/cpufeature.c (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index b6c16d5f622f..6ae35d160464 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -28,6 +28,18 @@ #ifndef __ASSEMBLY__ +struct arm64_cpu_capabilities { + const char *desc; + u16 capability; + bool (*matches)(const struct arm64_cpu_capabilities *); + union { + struct { /* To be used for erratum handling only */ + u32 midr_model; + u32 midr_range_min, midr_range_max; + }; + }; +}; + extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); static inline bool cpu_have_feature(unsigned int num) @@ -51,7 +63,10 @@ static inline void cpus_set_cap(unsigned int num) __set_bit(num, cpu_hwcaps); } +void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, + const char *info); void check_local_cpu_errata(void); +void check_local_cpu_features(void); bool cpu_supports_mixed_endian_el0(void); bool system_supports_mixed_endian_el0(void); diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d5e70747c7a2..b12e15b80516 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ - alternative.o cacheinfo.o + cpufeature.o alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index fa62637e63a8..a66f4fa4d541 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -16,8 +16,6 @@ * along with this program. If not, see . */ -#define pr_fmt(fmt) "alternatives: " fmt - #include #include #include @@ -26,27 +24,11 @@ #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) -/* - * Add a struct or another datatype to the union below if you need - * different means to detect an affected CPU. - */ -struct arm64_cpu_capabilities { - const char *desc; - u16 capability; - bool (*is_affected)(struct arm64_cpu_capabilities *); - union { - struct { - u32 midr_model; - u32 midr_range_min, midr_range_max; - }; - }; -}; - #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) static bool __maybe_unused -is_affected_midr_range(struct arm64_cpu_capabilities *entry) +is_affected_midr_range(const struct arm64_cpu_capabilities *entry) { u32 midr = read_cpuid_id(); @@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry) } #define MIDR_RANGE(model, min, max) \ - .is_affected = is_affected_midr_range, \ + .matches = is_affected_midr_range, \ .midr_model = model, \ .midr_range_min = min, \ .midr_range_max = max -struct arm64_cpu_capabilities arm64_errata[] = { +const struct arm64_cpu_capabilities arm64_errata[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \ defined(CONFIG_ARM64_ERRATUM_824069) @@ -97,15 +79,5 @@ struct arm64_cpu_capabilities arm64_errata[] = { void check_local_cpu_errata(void) { - struct arm64_cpu_capabilities *cpus = arm64_errata; - int i; - - for (i = 0; cpus[i].desc; i++) { - if (!cpus[i].is_affected(&cpus[i])) - continue; - - if (!cpus_have_cap(cpus[i].capability)) - pr_info("enabling workaround for %s\n", cpus[i].desc); - cpus_set_cap(cpus[i].capability); - } + check_cpu_capabilities(arm64_errata, "enabling workaround for"); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c new file mode 100644 index 000000000000..3d9967e43d89 --- /dev/null +++ b/arch/arm64/kernel/cpufeature.c @@ -0,0 +1,47 @@ +/* + * Contains CPU feature definitions + * + * Copyright (C) 2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define pr_fmt(fmt) "alternatives: " fmt + +#include +#include +#include + +static const struct arm64_cpu_capabilities arm64_features[] = { + {}, +}; + +void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, + const char *info) +{ + int i; + + for (i = 0; caps[i].desc; i++) { + if (!caps[i].matches(&caps[i])) + continue; + + if (!cpus_have_cap(caps[i].capability)) + pr_info("%s %s\n", info, caps[i].desc); + cpus_set_cap(caps[i].capability); + } +} + +void check_local_cpu_features(void) +{ + check_cpu_capabilities(arm64_features, "detected feature"); +} diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 929855691dae..75d5a867e7fb 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); + check_local_cpu_features(); update_cpu_features(info); } -- cgit v1.2.3 From 475bfd3d67fa8f7e9cbc08a943b5e8cb6f70044d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 30 Mar 2015 11:29:35 +0100 Subject: arm64: defconfig: updates for 4.1 Enable a few useful options in our defconfig: - New platform support (exynos7, seattle, tegra132) - SKY2 (ethernet in newer revisions of Juno) - Xgene reboot support - Virtio-pci for kvmtool and qemu - EFIVAR_FS (previously selected as a module) - NFSv4 Signed-off-by: Will Deacon --- arch/arm64/configs/defconfig | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index be1f12a5a5f0..e07896c819ef 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -31,8 +31,12 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_EXYNOS7=y CONFIG_ARCH_FSL_LS2085A=y CONFIG_ARCH_MEDIATEK=y +CONFIG_ARCH_SEATTLE=y +CONFIG_ARCH_TEGRA=y +CONFIG_ARCH_TEGRA_132_SOC=y CONFIG_ARCH_THUNDER=y CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_XGENE=y @@ -62,6 +66,7 @@ CONFIG_BPF_JIT=y # CONFIG_WIRELESS is not set CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y +# CONFIG_TEGRA_AHB is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -81,6 +86,7 @@ CONFIG_NETDEVICES=y CONFIG_TUN=y CONFIG_VIRTIO_NET=y CONFIG_NET_XGENE=y +CONFIG_SKY2=y CONFIG_SMC91X=y CONFIG_SMSC911X=y # CONFIG_WLAN is not set @@ -100,6 +106,8 @@ CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_PL061=y CONFIG_GPIO_XGENE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y # CONFIG_HWMON is not set CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y @@ -112,10 +120,10 @@ CONFIG_LOGO=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ISP1760_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y +CONFIG_USB_ISP1760=y CONFIG_USB_ULPI=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y @@ -125,6 +133,7 @@ CONFIG_MMC_SPI=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_XGENE=y +CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_MMIO=y # CONFIG_IOMMU_SUPPORT is not set @@ -143,8 +152,10 @@ CONFIG_CUSE=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y +CONFIG_EFIVAR_FS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_9P_FS=y CONFIG_NLS_CODEPAGE_437=y @@ -159,7 +170,6 @@ CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set -CONFIG_KEYS=y CONFIG_SECURITY=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y -- cgit v1.2.3 From cc3979b54d5f1d5b5059b404892888c304d28080 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 31 Mar 2015 00:46:00 +0100 Subject: arm64: Use bool function return values of true/false not 1/0 Use the normal return values for bool functions Signed-off-by: Joe Perches Signed-off-by: Will Deacon --- arch/arm64/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 6932bb57dba0..9437e3dc5833 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) - return 0; + return false; return addr + size - 1 <= *dev->dma_mask; } -- cgit v1.2.3 From 905e8c5dcaa147163672b06fe9dcb5abaacbc711 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 23 Mar 2015 19:07:02 +0000 Subject: arm64: errata: add workaround for cortex-a53 erratum #845719 When running a compat (AArch32) userspace on Cortex-A53, a load at EL0 from a virtual address that matches the bottom 32 bits of the virtual address used by a recent load at (AArch64) EL1 might return incorrect data. This patch works around the issue by writing to the contextidr_el1 register on the exception return path when returning to a 32-bit task. This workaround is patched in at runtime based on the MIDR value of the processor. Reviewed-by: Marc Zyngier Tested-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 21 +++++++++++++++++++++ arch/arm64/include/asm/cpufeature.h | 3 ++- arch/arm64/kernel/cpu_errata.c | 8 ++++++++ arch/arm64/kernel/entry.S | 20 ++++++++++++++++++++ 4 files changed, 51 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 02f67a9d7426..23d51be7d60a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075 If unsure, say Y. +config ARM64_ERRATUM_845719 + bool "Cortex-A53: 845719: a load might read incorrect data" + depends on COMPAT + default y + help + This option adds an alternative code sequence to work around ARM + erratum 845719 on Cortex-A53 parts up to r0p4. + + When running a compat (AArch32) userspace on an affected Cortex-A53 + part, a load at EL0 from a virtual address that matches the bottom 32 + bits of the virtual address used by a recent load at (AArch64) EL1 + might return incorrect data. + + The workaround is to write the contextidr_el1 register on exception + return to a 32-bit task. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + endmenu diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 6ae35d160464..82cb9f98ba1a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -23,8 +23,9 @@ #define ARM64_WORKAROUND_CLEAN_CACHE 0 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 +#define ARM64_WORKAROUND_845719 2 -#define ARM64_NCAPS 2 +#define ARM64_NCAPS 3 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a66f4fa4d541..4672860def1f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -72,6 +72,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_845719 + { + /* Cortex-A53 r0p[01234] */ + .desc = "ARM erratum 845719", + .capability = ARM64_WORKAROUND_845719, + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), + }, #endif { } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cf21bb3bf752..959fe8733560 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -21,8 +21,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -120,6 +122,24 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + +#ifdef CONFIG_ARM64_ERRATUM_845719 + alternative_insn \ + "nop", \ + "tbz x22, #4, 1f", \ + ARM64_WORKAROUND_845719 +#ifdef CONFIG_PID_IN_CONTEXTIDR + alternative_insn \ + "nop; nop", \ + "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ + ARM64_WORKAROUND_845719 +#else + alternative_insn \ + "nop", \ + "msr contextidr_el1, xzr; 1:", \ + ARM64_WORKAROUND_845719 +#endif +#endif .endif msr elr_el1, x21 // set up the return data msr spsr_el1, x22 -- cgit v1.2.3 From 6d1966dfd6e0ad2f8aa4b664ae1a62e33abe1998 Mon Sep 17 00:00:00 2001 From: Bo Yan Date: Tue, 31 Mar 2015 21:30:48 +0100 Subject: arm64: fix midr range for Cortex-A57 erratum 832075 Register MIDR_EL1 is masked to get variant and revision fields, then compared against midr_range_min and midr_range_max when checking whether CPU is affected by any particular erratum. However, variant and revision fields in MIDR_EL1 are separated by 16 bits, so the min and max of midr range should be constructed accordingly, otherwise the patch will not be applied when variant field is non-0. Cc: stable@vger.kernel.org # 3.19+ Acked-by: Andre Przywara Reviewed-by: Paul Walmsley Signed-off-by: Bo Yan [will: use MIDR_VARIANT_SHIFT to construct upper bound] Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 4672860def1f..6ffd91438560 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -70,7 +70,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 832075", .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, - MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif #ifdef CONFIG_ARM64_ERRATUM_845719 -- cgit v1.2.3