diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 10:11:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 10:11:12 -0700 |
commit | dbf83817315d9ce93b3e5b1c83a167f537245bd8 (patch) | |
tree | 63bd286d192b0867f608935d1ee114d9eb157941 /arch/riscv | |
parent | e1ec517e18acc0aa9795ff92c15f0adabcb12db9 (diff) | |
parent | 40284a072c42f6177184fb1f62ba94c69e0c0277 (diff) | |
download | linux-dbf83817315d9ce93b3e5b1c83a167f537245bd8.tar.bz2 |
Merge tag 'riscv-for-linus-5.9-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt:
"We have a lot of new kernel features for this merge window:
- ARCH_SUPPORTS_ATOMIC_RMW, to allow OSQ locks to be enabled
- The ability to enable NO_HZ_FULL
- Support for enabling kcov, kmemleak, stack protector, and VM
debugging
- JUMP_LABEL support
There are also a handful of cleanups"
* tag 'riscv-for-linus-5.9-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (24 commits)
riscv: disable stack-protector for vDSO
RISC-V: Fix build warning for smpboot.c
riscv: fix build warning of mm/pageattr
riscv: Fix build warning for mm/init
RISC-V: Setup exception vector early
riscv: Select ARCH_HAS_DEBUG_VM_PGTABLE
riscv: Use generic pgprot_* macros from <linux/pgtable.h>
mm: pgtable: Make generic pgprot_* macros available for no-MMU
riscv: Cleanup unnecessary define in asm-offset.c
riscv: Add jump-label implementation
riscv: Support R_RISCV_ADD64 and R_RISCV_SUB64 relocs
Replace HTTP links with HTTPS ones: RISC-V
riscv: Add STACKPROTECTOR supported
riscv: Fix typo in asm/hwcap.h uapi header
riscv: Add kmemleak support
riscv: Allow building with kcov coverage
riscv: Enable context tracking
riscv: Support irq_work via self IPIs
riscv: Enable LOCKDEP_SUPPORT & fixup TRACE_IRQFLAGS_SUPPORT
riscv: Fixup lockdep_assert_held with wrong param cpu_running
...
Diffstat (limited to 'arch/riscv')
28 files changed, 316 insertions, 34 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 6c4bce7cad8a..7b5905529146 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -13,11 +13,14 @@ config 32BIT config RISCV def_bool y select ARCH_CLOCKSOURCE_INIT + select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_HAS_BINFMT_FLAT + select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_WX select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_KCOV select ARCH_HAS_MMIOWB select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP @@ -47,6 +50,8 @@ config RISCV select GENERIC_TIME_VSYSCALL if MMU && 64BIT select HANDLE_DOMAIN_IRQ select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if MMU && 64BIT select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB_QXFER_PKT @@ -54,14 +59,18 @@ config RISCV select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ASM_MODVERSIONS + select HAVE_CONTEXT_TRACKING + select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU select HAVE_EBPF_JIT if MMU select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_GCC_PLUGINS select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA if MODULES @@ -179,6 +188,9 @@ config PGTABLE_LEVELS default 3 if 64BIT default 2 +config LOCKDEP_SUPPORT + def_bool y + source "arch/riscv/Kconfig.socs" menu "Platform type" diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 3530c59b3ea7..c59fca695f9d 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -14,6 +14,8 @@ # Based on the ia64 and arm64 boot/Makefile. # +KCOV_INSTRUMENT := n + OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image loader diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 4da4886246a4..d58c93efb603 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -17,6 +17,7 @@ CONFIG_BPF_SYSCALL=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y CONFIG_SMP=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_NET=y diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig index b48138e329ea..cd1df62b13c7 100644 --- a/arch/riscv/configs/nommu_k210_defconfig +++ b/arch/riscv/configs/nommu_k210_defconfig @@ -33,6 +33,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_CMDLINE="earlycon console=ttySIF0" CONFIG_CMDLINE_FORCE=y +CONFIG_JUMP_LABEL=y # CONFIG_BLOCK is not set CONFIG_BINFMT_FLAT=y # CONFIG_COREDUMP is not set diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig index cf74e179bf90..f27596e9663e 100644 --- a/arch/riscv/configs/nommu_virt_defconfig +++ b/arch/riscv/configs/nommu_virt_defconfig @@ -30,6 +30,7 @@ CONFIG_MAXPHYSMEM_2GB=y CONFIG_SMP=y CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0" CONFIG_CMDLINE_FORCE=y +CONFIG_JUMP_LABEL=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index 05bbf5240569..3a55f0e00d6c 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -17,6 +17,7 @@ CONFIG_BPF_SYSCALL=y CONFIG_SOC_VIRT=y CONFIG_ARCH_RV32I=y CONFIG_SMP=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_NET=y diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h new file mode 100644 index 000000000000..d6c277992f76 --- /dev/null +++ b/arch/riscv/include/asm/irq_work.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_IRQ_WORK_H +#define _ASM_RISCV_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} +extern void arch_irq_work_raise(void); +#endif /* _ASM_RISCV_IRQ_WORK_H */ diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h new file mode 100644 index 000000000000..38af2ec7b9bf --- /dev/null +++ b/arch/riscv/include/asm/jump_label.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Emil Renner Berthing + * + * Based on arch/arm64/include/asm/jump_label.h + */ +#ifndef __ASM_JUMP_LABEL_H +#define __ASM_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/asm.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) +{ + asm_volatile_goto( + " .option push \n\t" + " .option norelax \n\t" + " .option norvc \n\t" + "1: nop \n\t" + " .option pop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align " RISCV_LGPTR " \n\t" + " .long 1b - ., %l[label] - . \n\t" + " " RISCV_PTR " %0 - . \n\t" + " .popsection \n\t" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) +{ + asm_volatile_goto( + " .option push \n\t" + " .option norelax \n\t" + " .option norvc \n\t" + "1: jal zero, %l[label] \n\t" + " .option pop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align " RISCV_LGPTR " \n\t" + " .long 1b - ., %l[label] - . \n\t" + " " RISCV_PTR " %0 - . \n\t" + " .popsection \n\t" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index 56053c9838b2..aff6c33ab0c0 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -14,12 +14,6 @@ #include <linux/types.h> #include <asm/mmiowb.h> -#ifndef CONFIG_MMU -#define pgprot_noncached(x) (x) -#define pgprot_writecombine(x) (x) -#define pgprot_device(x) (x) -#endif /* CONFIG_MMU */ - /* Generic IO read/write. These perform native-endian accesses. */ #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 val, volatile void __iomem *addr) diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index 40bb1c15a731..6dfd2a1446d5 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -40,6 +40,9 @@ void arch_send_call_function_single_ipi(int cpu); int riscv_hartid_to_cpuid(int hartid); void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); +/* Secondary hart entry */ +asmlinkage void smp_callin(void); + /* * Obtains the hart ID of the currently executing task. This relies on * THREAD_INFO_IN_TASK, but we define that unconditionally. diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h new file mode 100644 index 000000000000..d95f7b2a7f37 --- /dev/null +++ b/arch/riscv/include/asm/stackprotector.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_STACKPROTECTOR_H +#define _ASM_RISCV_STACKPROTECTOR_H + +#include <linux/random.h> +#include <linux/version.h> +#include <asm/timex.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + unsigned long tsc; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + tsc = get_cycles(); + canary += tsc + (tsc << BITS_PER_LONG/2); + canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} +#endif /* _ASM_RISCV_STACKPROTECTOR_H */ diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h index dee98ee28318..46dc3f5ee99f 100644 --- a/arch/riscv/include/uapi/asm/hwcap.h +++ b/arch/riscv/include/uapi/asm/hwcap.h @@ -11,7 +11,7 @@ /* * Linux saves the floating-point registers according to the ISA Linux is * executing on, as opposed to the ISA the user program is compiled for. This - * is necessary for a handful of esoteric use cases: for example, userpsace + * is necessary for a handful of esoteric use cases: for example, userspace * threading libraries must be able to examine the actual machine state in * order to fully reconstruct the state of a thread. */ diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h index 13ce76cc5aff..4b989ae15d59 100644 --- a/arch/riscv/include/uapi/asm/unistd.h +++ b/arch/riscv/include/uapi/asm/unistd.h @@ -12,7 +12,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. + * along with this program. If not, see <https://www.gnu.org/licenses/>. */ #ifdef __LP64__ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index b355cf485671..a5287ab9f7f2 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -53,4 +53,6 @@ endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o + clean: diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 07cb9c10de4e..db203442c08f 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -27,9 +27,6 @@ void asm_offsets(void) OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); - OFFSET(TASK_THREAD_SP, task_struct, thread.sp); - OFFSET(TASK_STACK, task_struct, stack); - OFFSET(TASK_TI, task_struct, thread_info); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index cae7e6d4c7ef..524d918f3601 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -97,19 +97,36 @@ _save_context: la gp, __global_pointer$ .option pop - la ra, ret_from_exception +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off +#endif + +#ifdef CONFIG_CONTEXT_TRACKING + /* If previous state is in user mode, call context_tracking_user_exit. */ + li a0, SR_PP + and a0, s1, a0 + bnez a0, skip_context_tracking + call context_tracking_user_exit +skip_context_tracking: +#endif + /* * MSB of cause differentiates between * interrupts and exceptions */ bge s4, zero, 1f + la ra, ret_from_exception + /* Handle interrupts */ move a0, sp /* pt_regs */ la a1, handle_arch_irq REG_L a1, (a1) jr a1 1: +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_on +#endif /* * Exceptions run with interrupts enabled or disabled depending on the * state of SR_PIE in m/sstatus. @@ -119,6 +136,7 @@ _save_context: csrs CSR_STATUS, SR_IE 1: + la ra, ret_from_exception /* Handle syscalls */ li t0, EXC_SYSCALL beq s4, t0, handle_syscall @@ -137,6 +155,17 @@ _save_context: tail do_trap_unknown handle_syscall: +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) + /* Recover a0 - a7 for system calls */ + REG_L a0, PT_A0(sp) + REG_L a1, PT_A1(sp) + REG_L a2, PT_A2(sp) + REG_L a3, PT_A3(sp) + REG_L a4, PT_A4(sp) + REG_L a5, PT_A5(sp) + REG_L a6, PT_A6(sp) + REG_L a7, PT_A7(sp) +#endif /* save the initial A0 value (needed in signal handlers) */ REG_S a0, PT_ORIG_A0(sp) /* @@ -190,6 +219,9 @@ ret_from_syscall_rejected: ret_from_exception: REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off +#endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ li t0, SR_MPP @@ -205,6 +237,10 @@ resume_userspace: andi s1, s0, _TIF_WORK_MASK bnez s1, work_pending +#ifdef CONFIG_CONTEXT_TRACKING + call context_tracking_user_enter +#endif + /* Save unwound kernel stack pointer in thread_info */ addi s0, sp, PT_SIZE_ON_STACK REG_S s0, TASK_TI_KERNEL_SP(tp) @@ -216,6 +252,16 @@ resume_userspace: csrw CSR_SCRATCH, tp restore_all: +#ifdef CONFIG_TRACE_IRQFLAGS + REG_L s1, PT_STATUS(sp) + andi t0, s1, SR_PIE + beqz t0, 1f + call trace_hardirqs_on + j 2f +1: + call trace_hardirqs_off +2: +#endif REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's @@ -389,12 +435,8 @@ ENTRY(__switch_to) lw a4, TASK_TI_CPU(a1) sw a3, TASK_TI_CPU(a1) sw a4, TASK_TI_CPU(a0) -#if TASK_TI != 0 -#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." - addi tp, a1, TASK_TI -#else + /* The offset of thread_info in task_struct is zero. */ move tp, a1 -#endif ret ENDPROC(__switch_to) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 7ed1b22950fd..d0c5c316e9bb 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -77,10 +77,16 @@ relocate: csrw CSR_SATP, a0 .align 2 1: - /* Set trap vector to spin forever to help debug */ - la a0, .Lsecondary_park + /* Set trap vector to exception handler */ + la a0, handle_exception csrw CSR_TVEC, a0 + /* + * Set sup0 scratch register to 0, indicating to exception vector that + * we are presently executing in kernel. + */ + csrw CSR_SCRATCH, zero + /* Reload the global pointer */ .option push .option norelax diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c new file mode 100644 index 000000000000..20e09056d141 --- /dev/null +++ b/arch/riscv/kernel/jump_label.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Emil Renner Berthing + * + * Based on arch/arm64/kernel/jump_label.c + */ +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/memory.h> +#include <linux/mutex.h> +#include <asm/bug.h> +#include <asm/patch.h> + +#define RISCV_INSN_NOP 0x00000013U +#define RISCV_INSN_JAL 0x0000006fU + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + void *addr = (void *)jump_entry_code(entry); + u32 insn; + + if (type == JUMP_LABEL_JMP) { + long offset = jump_entry_target(entry) - jump_entry_code(entry); + + if (WARN_ON(offset & 1 || offset < -524288 || offset >= 524288)) + return; + + insn = RISCV_INSN_JAL | + (((u32)offset & GENMASK(19, 12)) << (12 - 12)) | + (((u32)offset & GENMASK(11, 11)) << (20 - 11)) | + (((u32)offset & GENMASK(10, 1)) << (21 - 1)) | + (((u32)offset & GENMASK(20, 20)) << (31 - 20)); + } else { + insn = RISCV_INSN_NOP; + } + + mutex_lock(&text_mutex); + patch_text_nosync(addr, &insn, sizeof(insn)); + mutex_unlock(&text_mutex); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * We use the same instructions in the arch_static_branch and + * arch_static_branch_jump inline functions, so there's no + * need to patch them up here. + * The core will call arch_jump_label_transform when those + * instructions need to be replaced. + */ +} diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 7191342c54da..104fba889cf7 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -263,6 +263,13 @@ static int apply_r_riscv_add32_rela(struct module *me, u32 *location, return 0; } +static int apply_r_riscv_add64_rela(struct module *me, u32 *location, + Elf_Addr v) +{ + *(u64 *)location += (u64)v; + return 0; +} + static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, Elf_Addr v) { @@ -270,6 +277,13 @@ static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, return 0; } +static int apply_r_riscv_sub64_rela(struct module *me, u32 *location, + Elf_Addr v) +{ + *(u64 *)location -= (u64)v; + return 0; +} + static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, Elf_Addr v) = { [R_RISCV_32] = apply_r_riscv_32_rela, @@ -290,7 +304,9 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, [R_RISCV_RELAX] = apply_r_riscv_relax_rela, [R_RISCV_ALIGN] = apply_r_riscv_align_rela, [R_RISCV_ADD32] = apply_r_riscv_add32_rela, + [R_RISCV_ADD64] = apply_r_riscv_add64_rela, [R_RISCV_SUB32] = apply_r_riscv_sub32_rela, + [R_RISCV_SUB64] = apply_r_riscv_sub64_rela, }; int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 31f39442df72..2b97c493427c 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -24,6 +24,12 @@ register unsigned long gp_in_global __asm__("gp"); +#ifdef CONFIG_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + extern asmlinkage void ret_from_fork(void); extern asmlinkage void ret_from_kernel_thread(void); diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index b1d4f452f843..554b0fb47060 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -16,6 +16,7 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/delay.h> +#include <linux/irq_work.h> #include <asm/clint.h> #include <asm/sbi.h> @@ -26,6 +27,7 @@ enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, + IPI_IRQ_WORK, IPI_MAX }; @@ -123,6 +125,13 @@ static inline void clear_ipi(void) clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); } +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + send_ipi_single(smp_processor_id(), IPI_IRQ_WORK); +} +#endif + void handle_IPI(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -158,6 +167,11 @@ void handle_IPI(struct pt_regs *regs) ipi_stop(); } + if (ops & (1 << IPI_IRQ_WORK)) { + stats[IPI_IRQ_WORK]++; + irq_work_run(); + } + BUG_ON((ops >> IPI_MAX) != 0); /* Order data access and bit testing. */ @@ -173,6 +187,7 @@ static const char * const ipi_names[] = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 4e9922790f6e..356825a57551 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -106,7 +106,7 @@ void __init setup_smp(void) } } -int start_secondary_cpu(int cpu, struct task_struct *tidle) +static int start_secondary_cpu(int cpu, struct task_struct *tidle) { if (cpu_ops[cpu]->cpu_start) return cpu_ops[cpu]->cpu_start(cpu, tidle); @@ -121,7 +121,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) ret = start_secondary_cpu(cpu, tidle); if (!ret) { - lockdep_assert_held(&cpu_running); wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); @@ -146,6 +145,7 @@ void __init smp_cpus_done(unsigned int max_cpus) asmlinkage __visible void smp_callin(void) { struct mm_struct *mm = &init_mm; + unsigned int curr_cpuid = smp_processor_id(); if (!IS_ENABLED(CONFIG_RISCV_SBI)) clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); @@ -154,10 +154,10 @@ asmlinkage __visible void smp_callin(void) mmgrab(mm); current->active_mm = mm; - trap_init(); - notify_cpu_starting(smp_processor_id()); - update_siblings_masks(smp_processor_id()); - set_cpu_online(smp_processor_id(), 1); + notify_cpu_starting(curr_cpuid); + update_siblings_masks(curr_cpuid); + set_cpu_online(curr_cpuid, 1); + /* * Remote TLB flushes are ignored while the CPU is offline, so emit * a local TLB flush right now just in case. diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 7d95cce5e47c..ad14f4466d92 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -174,13 +174,7 @@ int is_valid_bugaddr(unsigned long pc) } #endif /* CONFIG_GENERIC_BUG */ +/* stvec & scratch is already set from head.S */ void trap_init(void) { - /* - * Set sup0 scratch register to 0, indicating to exception vector - * that we are presently executing in the kernel - */ - csr_write(CSR_SCRATCH, 0); - /* Set the exception vector address */ - csr_write(CSR_TVEC, &handle_exception); } diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index e4c7c2c8a02f..478e7338ddc1 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -16,6 +16,8 @@ vdso-syms += flush_icache # Files to link into the vdso obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o +ccflags-y := -fno-stack-protector + ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) endif @@ -32,6 +34,7 @@ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os # Disable gcov profiling for VDSO code GCOV_PROFILE := n +KCOV_INSTRUMENT := n # Force dependency $(obj)/vdso.o: $(obj)/vdso.so diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index e6f8016b366a..f3586e31ed1e 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -22,6 +22,7 @@ SECTIONS /* Beginning of code and text segment */ . = LOAD_OFFSET; _start = .; + _stext = .; HEAD_TEXT_SECTION . = ALIGN(PAGE_SIZE); @@ -54,7 +55,6 @@ SECTIONS . = ALIGN(SECTION_ALIGN); .text : { _text = .; - _stext = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 363ef01c30b1..c0185e556ca5 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -5,6 +5,8 @@ ifdef CONFIG_FTRACE CFLAGS_REMOVE_init.o = -pg endif +KCOV_INSTRUMENT_init.o := n + obj-y += init.o obj-y += extable.o obj-$(CONFIG_MMU) += fault.o pageattr.o diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 79e9d55bdf1a..50bcd9f594d7 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -541,6 +541,32 @@ void mark_rodata_ro(void) } #endif +static void __init resource_init(void) +{ + struct memblock_region *region; + + for_each_memblock(memory, region) { + struct resource *res; + + res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct resource)); + + if (memblock_is_nomap(region)) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + + request_resource(&iomem_resource, res); + } +} + void __init paging_init(void) { setup_vm_final(); @@ -548,6 +574,7 @@ void __init paging_init(void) sparse_init(); setup_zero_page(); zone_sizes_init(); + resource_init(); } #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 289a9a5ea5b5..19fecb362d81 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -7,6 +7,7 @@ #include <linux/pgtable.h> #include <asm/tlbflush.h> #include <asm/bitops.h> +#include <asm/set_memory.h> struct pageattr_masks { pgprot_t set_mask; @@ -94,7 +95,7 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next, return 0; } -const static struct mm_walk_ops pageattr_ops = { +static const struct mm_walk_ops pageattr_ops = { .pgd_entry = pageattr_pgd_entry, .p4d_entry = pageattr_p4d_entry, .pud_entry = pageattr_pud_entry, |