summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPasha Tatashin <pasha.tatashin@soleen.com>2021-09-30 14:31:03 +0000
committerWill Deacon <will@kernel.org>2021-10-01 13:31:00 +0100
commit5bb6834fc2900052a377df79b9ab065a698bf70b (patch)
treef660dc2458c8a65dd6276c1279e572f49c59fb7d
parent0d8732e461d6b4dc2c625a69225f20e24da4dd79 (diff)
downloadlinux-5bb6834fc2900052a377df79b9ab065a698bf70b.tar.bz2
arm64: kexec: skip relocation code for inplace kexec
In case of kdump or when segments are already in place the relocation is not needed, therefore the setup of relocation function and call to it can be skipped. Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Suggested-by: James Morse <james.morse@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20210930143113.1502553-6-pasha.tatashin@soleen.com Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/kernel/machine_kexec.c34
-rw-r--r--arch/arm64/kernel/relocate_kernel.S3
2 files changed, 21 insertions, 16 deletions
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index b6d5a02cba2e..7f1cb5a2a463 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -144,16 +144,16 @@ int machine_kexec_post_load(struct kimage *kimage)
{
void *reloc_code = page_to_virt(kimage->control_code_page);
- /* If in place flush new kernel image, else flush lists and buffers */
- if (kimage->head & IND_DONE)
+ /* If in place, relocation is not used, only flush next kernel */
+ if (kimage->head & IND_DONE) {
kexec_segment_flush(kimage);
- else
- kexec_list_flush(kimage);
+ kexec_image_info(kimage);
+ return 0;
+ }
memcpy(reloc_code, arm64_relocate_new_kernel,
arm64_relocate_new_kernel_size);
kimage->arch.kern_reloc = __pa(reloc_code);
- kexec_image_info(kimage);
/* Flush the reloc_code in preparation for its execution. */
dcache_clean_inval_poc((unsigned long)reloc_code,
@@ -162,6 +162,8 @@ int machine_kexec_post_load(struct kimage *kimage)
icache_inval_pou((uintptr_t)reloc_code,
(uintptr_t)reloc_code +
arm64_relocate_new_kernel_size);
+ kexec_list_flush(kimage);
+ kexec_image_info(kimage);
return 0;
}
@@ -188,19 +190,25 @@ void machine_kexec(struct kimage *kimage)
local_daif_mask();
/*
- * cpu_soft_restart will shutdown the MMU, disable data caches, then
- * transfer control to the kern_reloc which contains a copy of
- * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
- * uses physical addressing to relocate the new image to its final
- * position and transfers control to the image entry point when the
- * relocation is complete.
+ * Both restart and cpu_soft_restart will shutdown the MMU, disable data
+ * caches. However, restart will start new kernel or purgatory directly,
+ * cpu_soft_restart will transfer control to arm64_relocate_new_kernel
* In kexec case, kimage->start points to purgatory assuming that
* kernel entry and dtb address are embedded in purgatory by
* userspace (kexec-tools).
* In kexec_file case, the kernel starts directly without purgatory.
*/
- cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start,
- kimage->arch.dtb_mem);
+ if (kimage->head & IND_DONE) {
+ typeof(__cpu_soft_restart) *restart;
+
+ cpu_install_idmap();
+ restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart));
+ restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem,
+ 0, 0);
+ } else {
+ cpu_soft_restart(kimage->arch.kern_reloc, kimage->head,
+ kimage->start, kimage->arch.dtb_mem);
+ }
BUG(); /* Should never get here. */
}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index b78ea5de97a4..8058fabe0a76 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -32,8 +32,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
mov x16, x0 /* x16 = kimage_head */
mov x14, xzr /* x14 = entry ptr */
mov x13, xzr /* x13 = copy dest */
- /* Check if the new image needs relocation. */
- tbnz x16, IND_DONE_BIT, .Ldone
raw_dcache_line_size x15, x1 /* x15 = dcache line size */
.Lloop:
and x12, x16, PAGE_MASK /* x12 = addr */
@@ -65,7 +63,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
.Lnext:
ldr x16, [x14], #8 /* entry = *ptr++ */
tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */
-.Ldone:
/* wait for writes from copy_page to finish */
dsb nsh
ic iallu