diff options
author | Atish Patra <atish.patra@wdc.com> | 2020-03-17 18:11:39 -0700 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2020-03-31 11:25:50 -0700 |
commit | e011995e826f85fbe55dc7d4ce649461163d1052 (patch) | |
tree | 1c65250470e736044cf4c6d4554b3f93c5d50c68 /arch/riscv | |
parent | 1ef46c231df4b856559ec0234bfcbb41a1180b97 (diff) | |
download | linux-e011995e826f85fbe55dc7d4ce649461163d1052.tar.bz2 |
RISC-V: Move relocate and few other functions out of __init
The secondary hart booting and relocation code are under .init section.
As a result, it will be freed once kernel booting is done. However,
ordered booting protocol and CPU hotplug always requires these functions
to be present to bringup harts after initial kernel boot.
Move the required functions to a different section and make sure that
they are in memory within first 2MB offset as trampoline page directory
only maps first 2MB.
Signed-off-by: Atish Patra <atish.patra@wdc.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/kernel/head.S | 153 | ||||
-rw-r--r-- | arch/riscv/kernel/vmlinux.lds.S | 5 |
2 files changed, 86 insertions, 72 deletions
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 85f2073e7fe4..173507395a6b 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -14,7 +14,7 @@ #include <asm/hwcap.h> #include <asm/image.h> -__INIT +__HEAD ENTRY(_start) /* * Image header expected by Linux boot-loaders. The image header data @@ -45,8 +45,85 @@ ENTRY(_start) .ascii RISCV_IMAGE_MAGIC2 .word 0 -.global _start_kernel -_start_kernel: +.align 2 +#ifdef CONFIG_MMU +relocate: + /* Relocate return address */ + li a1, PAGE_OFFSET + la a2, _start + sub a1, a1, a2 + add ra, ra, a1 + + /* Point stvec to virtual address of intruction after satp write */ + la a2, 1f + add a2, a2, a1 + csrw CSR_TVEC, a2 + + /* Compute satp for kernel page tables, but don't load it yet */ + srl a2, a0, PAGE_SHIFT + li a1, SATP_MODE + or a2, a2, a1 + + /* + * Load trampoline page directory, which will cause us to trap to + * stvec if VA != PA, or simply fall through if VA == PA. We need a + * full fence here because setup_vm() just wrote these PTEs and we need + * to ensure the new translations are in use. + */ + la a0, trampoline_pg_dir + srl a0, a0, PAGE_SHIFT + or a0, a0, a1 + sfence.vma + csrw CSR_SATP, a0 +.align 2 +1: + /* Set trap vector to spin forever to help debug */ + la a0, .Lsecondary_park + csrw CSR_TVEC, a0 + + /* Reload the global pointer */ +.option push +.option norelax + la gp, __global_pointer$ +.option pop + + /* + * Switch to kernel page tables. A full fence is necessary in order to + * avoid using the trampoline translations, which are only correct for + * the first superpage. Fetching the fence is guarnteed to work + * because that first superpage is translated the same way. + */ + csrw CSR_SATP, a2 + sfence.vma + + ret +#endif /* CONFIG_MMU */ +#ifdef CONFIG_SMP + /* Set trap vector to spin forever to help debug */ + la a3, .Lsecondary_park + csrw CSR_TVEC, a3 + + slli a3, a0, LGREG + .global secondary_start_common +secondary_start_common: + +#ifdef CONFIG_MMU + /* Enable virtual memory and relocate to virtual address */ + la a0, swapper_pg_dir + call relocate +#endif + tail smp_callin +#endif /* CONFIG_SMP */ + +.Lsecondary_park: + /* We lack SMP support or have too many harts, so park this hart */ + wfi + j .Lsecondary_park + +END(_start) + + __INIT +ENTRY(_start_kernel) /* Mask all interrupts */ csrw CSR_IE, zero csrw CSR_IP, zero @@ -134,59 +211,6 @@ clear_bss_done: call parse_dtb tail start_kernel -#ifdef CONFIG_MMU -relocate: - /* Relocate return address */ - li a1, PAGE_OFFSET - la a2, _start - sub a1, a1, a2 - add ra, ra, a1 - - /* Point stvec to virtual address of intruction after satp write */ - la a2, 1f - add a2, a2, a1 - csrw CSR_TVEC, a2 - - /* Compute satp for kernel page tables, but don't load it yet */ - srl a2, a0, PAGE_SHIFT - li a1, SATP_MODE - or a2, a2, a1 - - /* - * Load trampoline page directory, which will cause us to trap to - * stvec if VA != PA, or simply fall through if VA == PA. We need a - * full fence here because setup_vm() just wrote these PTEs and we need - * to ensure the new translations are in use. - */ - la a0, trampoline_pg_dir - srl a0, a0, PAGE_SHIFT - or a0, a0, a1 - sfence.vma - csrw CSR_SATP, a0 -.align 2 -1: - /* Set trap vector to spin forever to help debug */ - la a0, .Lsecondary_park - csrw CSR_TVEC, a0 - - /* Reload the global pointer */ -.option push -.option norelax - la gp, __global_pointer$ -.option pop - - /* - * Switch to kernel page tables. A full fence is necessary in order to - * avoid using the trampoline translations, which are only correct for - * the first superpage. Fetching the fence is guarnteed to work - * because that first superpage is translated the same way. - */ - csrw CSR_SATP, a2 - sfence.vma - - ret -#endif /* CONFIG_MMU */ - .Lsecondary_start: #ifdef CONFIG_SMP /* Set trap vector to spin forever to help debug */ @@ -211,16 +235,10 @@ relocate: beqz tp, .Lwait_for_cpu_up fence -#ifdef CONFIG_MMU - /* Enable virtual memory and relocate to virtual address */ - la a0, swapper_pg_dir - call relocate + tail secondary_start_common #endif - tail smp_callin -#endif - -END(_start) +END(_start_kernel) #ifdef CONFIG_RISCV_M_MODE ENTRY(reset_regs) @@ -301,13 +319,6 @@ ENTRY(reset_regs) END(reset_regs) #endif /* CONFIG_RISCV_M_MODE */ -.section ".text", "ax",@progbits -.align 2 -.Lsecondary_park: - /* We lack SMP support or have too many harts, so park this hart */ - wfi - j .Lsecondary_park - __PAGE_ALIGNED_BSS /* Empty zero page */ .balign PAGE_SIZE diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 4ad3c8eb241d..435cd60dca04 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -11,6 +11,7 @@ #include <asm/thread_info.h> #include <asm/set_memory.h> +#include <linux/sizes.h> OUTPUT_ARCH(riscv) ENTRY(_start) @@ -21,8 +22,10 @@ SECTIONS /* Beginning of code and text segment */ . = LOAD_OFFSET; _start = .; - __init_begin = .; HEAD_TEXT_SECTION + . = ALIGN(PAGE_SIZE); + + __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) /* we have to discard exit text and such at runtime, not link time */ |