summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig26
-rw-r--r--arch/s390/boot/compressed/decompressor.h1
-rw-r--r--arch/s390/boot/head.S54
-rw-r--r--arch/s390/boot/ipl_parm.c4
-rw-r--r--arch/s390/boot/pgm_check_info.c4
-rw-r--r--arch/s390/boot/startup.c8
-rw-r--r--arch/s390/configs/debug_defconfig9
-rw-r--r--arch/s390/configs/defconfig6
-rw-r--r--arch/s390/include/asm/barrier.h24
-rw-r--r--arch/s390/include/asm/bitops.h2
-rw-r--r--arch/s390/include/asm/cpu.h3
-rw-r--r--arch/s390/include/asm/debug.h2
-rw-r--r--arch/s390/include/asm/ftrace.h58
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/include/asm/livepatch.h4
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/nospec-branch.h5
-rw-r--r--arch/s390/include/asm/pgtable.h12
-rw-r--r--arch/s390/include/asm/ptrace.h23
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h9
-rw-r--r--arch/s390/include/asm/string.h4
-rw-r--r--arch/s390/include/asm/text-patching.h16
-rw-r--r--arch/s390/include/uapi/asm/setup.h13
-rw-r--r--arch/s390/kernel/alternative.c20
-rw-r--r--arch/s390/kernel/asm-offsets.c7
-rw-r--r--arch/s390/kernel/cpcmd.c6
-rw-r--r--arch/s390/kernel/dumpstack.c2
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/entry.S45
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c96
-rw-r--r--arch/s390/kernel/head64.S18
-rw-r--r--arch/s390/kernel/irq.c10
-rw-r--r--arch/s390/kernel/jump_label.c34
-rw-r--r--arch/s390/kernel/kprobes.c48
-rw-r--r--arch/s390/kernel/machine_kexec_file.c35
-rw-r--r--arch/s390/kernel/mcount.S64
-rw-r--r--arch/s390/kernel/nospec-branch.c2
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c228
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c42
-rw-r--r--arch/s390/kernel/syscall.c2
-rw-r--r--arch/s390/kernel/traps.c10
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/spinlock.c2
-rw-r--r--arch/s390/lib/string.c45
-rw-r--r--arch/s390/lib/test_kprobes.c75
-rw-r--r--arch/s390/lib/test_kprobes.h10
-rw-r--r--arch/s390/lib/test_kprobes_asm.S45
-rw-r--r--arch/s390/lib/test_unwind.c169
-rw-r--r--arch/s390/mm/cmm.c11
-rw-r--r--arch/s390/mm/dump_pagetables.c14
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/s390/mm/vmem.c10
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/s390/pci/pci_dma.c25
-rw-r--r--arch/s390/pci/pci_event.c4
-rw-r--r--arch/s390/pci/pci_sysfs.c8
-rw-r--r--arch/x86/Kconfig2
63 files changed, 918 insertions, 493 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b86de61b8caa..8857ec3b97eb 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -153,12 +153,15 @@ config S390
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
@@ -190,6 +193,7 @@ config S390
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
+ select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
@@ -434,6 +438,14 @@ endchoice
config 64BIT
def_bool y
+config COMMAND_LINE_SIZE
+ int "Maximum size of kernel command line"
+ default 4096
+ range 896 1048576
+ help
+ This allows you to specify the maximum length of the kernel command
+ line.
+
config COMPAT
def_bool y
prompt "Kernel support for 31 bit emulation"
@@ -938,6 +950,8 @@ menu "Selftests"
config S390_UNWIND_SELFTEST
def_tristate n
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
prompt "Test unwind functions"
help
This option enables s390 specific stack unwinder testing kernel
@@ -946,4 +960,16 @@ config S390_UNWIND_SELFTEST
Say N if you are unsure.
+config S390_KPROBES_SANITY_TEST
+ def_tristate n
+ prompt "Enable s390 specific kprobes tests"
+ depends on KPROBES
+ depends on KUNIT
+ help
+ This option enables an s390 specific kprobes test module. This option
+ is not useful for distributions or general kernels, but only for kernel
+ developers working on architecture code.
+
+ Say N if you are unsure.
+
endmenu
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
index a59f75c5b049..f75cc31a77dd 100644
--- a/arch/s390/boot/compressed/decompressor.h
+++ b/arch/s390/boot/compressed/decompressor.h
@@ -24,6 +24,7 @@ struct vmlinux_info {
unsigned long dynsym_start;
unsigned long rela_dyn_start;
unsigned long rela_dyn_end;
+ unsigned long amode31_size;
};
/* Symbols defined by linker scripts */
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 40f4cff538b8..3a252d140c55 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -184,35 +184,23 @@ iplstart:
bas %r14,.Lloader # load parameter file
ltr %r2,%r2 # got anything ?
bz .Lnopf
- chi %r2,895
- bnh .Lnotrunc
- la %r2,895
+ l %r3,MAX_COMMAND_LINE_SIZE+ARCH_OFFSET-PARMAREA(%r12)
+ ahi %r3,-1
+ clr %r2,%r3
+ bl .Lnotrunc
+ lr %r2,%r3
.Lnotrunc:
l %r4,.Linitrd
clc 0(3,%r4),.L_hdr # if it is HDRx
bz .Lagain1 # skip dataset header
clc 0(3,%r4),.L_eof # if it is EOFx
bz .Lagain1 # skip dateset trailer
- la %r5,0(%r4,%r2)
- lr %r3,%r2
- la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
- mvc 0(256,%r3),0(%r4)
- mvc 256(256,%r3),256(%r4)
- mvc 512(256,%r3),512(%r4)
- mvc 768(122,%r3),768(%r4)
- slr %r0,%r0
- b .Lcntlp
-.Ldelspc:
- ic %r0,0(%r2,%r3)
- chi %r0,0x20 # is it a space ?
- be .Lcntlp
- ahi %r2,1
- b .Leolp
-.Lcntlp:
- brct %r2,.Ldelspc
-.Leolp:
- slr %r0,%r0
- stc %r0,0(%r2,%r3) # terminate buffer
+
+ lr %r5,%r2
+ la %r6,COMMAND_LINE-PARMAREA(%r12)
+ lr %r7,%r2
+ ahi %r7,1
+ mvcl %r6,%r4
.Lnopf:
#
@@ -317,6 +305,7 @@ SYM_CODE_START_LOCAL(startup_normal)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
+ lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
spt 6f-.LPG0(%r13)
@@ -335,6 +324,22 @@ SYM_CODE_END(startup_normal)
.quad 0x0000000180000000,startup_pgm_check_handler
.Lio_new_psw:
.quad 0x0002000180000000,0x1f0 # disabled wait
+.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
+ .quad 0 # cr1: primary space segment table
+ .quad 0 # cr2: dispatchable unit control table
+ .quad 0 # cr3: instruction authorization
+ .quad 0xffff # cr4: instruction authorization
+ .quad 0 # cr5: primary-aste origin
+ .quad 0 # cr6: I/O interrupts
+ .quad 0 # cr7: secondary space segment table
+ .quad 0x0000000000008000 # cr8: access registers translation
+ .quad 0 # cr9: tracing off
+ .quad 0 # cr10: tracing off
+ .quad 0 # cr11: tracing off
+ .quad 0 # cr12: tracing off
+ .quad 0 # cr13: home space segment table
+ .quad 0xc0000000 # cr14: machine check handling off
+ .quad 0 # cr15: linkage stack operations
#include "head_kdump.S"
@@ -377,11 +382,10 @@ SYM_DATA_START(parmarea)
.quad 0 # OLDMEM_BASE
.quad 0 # OLDMEM_SIZE
.quad kernel_version # points to kernel version string
+ .quad COMMAND_LINE_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org PARMAREA+__PARMAREA_SIZE
SYM_DATA_END(parmarea)
-
- .org HEAD_END
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 0f84c072625e..9ed7e29c81d9 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -170,10 +170,10 @@ static inline int has_ebcdic_char(const char *str)
void setup_boot_command_line(void)
{
- parmarea.command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+ parmarea.command_line[COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
if (has_ebcdic_char(parmarea.command_line))
- EBCASC(parmarea.command_line, ARCH_COMMAND_LINE_SIZE);
+ EBCASC(parmarea.command_line, COMMAND_LINE_SIZE);
/* copy arch command line */
strcpy(early_command_line, strim(parmarea.command_line));
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index 75bcbfa27941..c2a1defc79da 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -175,6 +175,6 @@ void print_pgm_check_info(void)
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace();
decompressor_printk("Last Breaking-Event-Address:\n");
- decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.breaking_event_addr,
- (void *)S390_lowcore.breaking_event_addr);
+ decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
+ (void *)S390_lowcore.pgm_last_break);
}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 6dc8d0a53864..7571dee72a0c 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -15,6 +15,7 @@
#include "uv.h"
unsigned long __bootdata_preserved(__kaslr_offset);
+unsigned long __bootdata(__amode31_base);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
@@ -259,6 +260,12 @@ static void offset_vmlinux_info(unsigned long offset)
vmlinux.dynsym_start += offset;
}
+static unsigned long reserve_amode31(unsigned long safe_addr)
+{
+ __amode31_base = PAGE_ALIGN(safe_addr);
+ return safe_addr + vmlinux.amode31_size;
+}
+
void startup_kernel(void)
{
unsigned long random_lma;
@@ -273,6 +280,7 @@ void startup_kernel(void)
setup_lpp();
store_ipl_parmblock();
safe_addr = mem_safe_offset();
+ safe_addr = reserve_amode31(safe_addr);
safe_addr = read_ipl_report(safe_addr);
uv_query_info();
rescue_initrd(safe_addr);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 6aad18ee131d..fd825097cf04 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -61,7 +61,8 @@ CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
-CONFIG_S390_UNWIND_SELFTEST=y
+CONFIG_S390_UNWIND_SELFTEST=m
+CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
@@ -776,7 +777,6 @@ CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
-CONFIG_DMA_API_DEBUG=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
@@ -839,8 +839,13 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_FTRACE_STARTUP_TEST=y
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
+CONFIG_SAMPLES=y
+CONFIG_SAMPLE_TRACE_PRINTK=m
+CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_DEBUG_ENTRY=y
CONFIG_CIO_INJECT=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_DEBUGFS=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index f08b161c9446..c9c3cedff2d8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -60,6 +60,7 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
+CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
# CONFIG_GCC_PLUGINS is not set
@@ -788,6 +789,11 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
+CONFIG_SAMPLES=y
+CONFIG_SAMPLE_TRACE_PRINTK=m
+CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_KUNIT=m
+CONFIG_KUNIT_DEBUGFS=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index f9eddbca79d2..2c057e1f3200 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -16,20 +16,24 @@
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
-#define __ASM_BARRIER "bcr 14,0\n"
+#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
#else
-#define __ASM_BARRIER "bcr 15,0\n"
+#define __ASM_BCR_SERIALIZE "bcr 15,0\n"
#endif
-#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
+static __always_inline void bcr_serialize(void)
+{
+ asm volatile(__ASM_BCR_SERIALIZE : : : "memory");
+}
-#define rmb() barrier()
-#define wmb() barrier()
-#define dma_rmb() mb()
-#define dma_wmb() mb()
-#define __smp_mb() mb()
-#define __smp_rmb() rmb()
-#define __smp_wmb() wmb()
+#define mb() bcr_serialize()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+#define __smp_mb() mb()
+#define __smp_rmb() rmb()
+#define __smp_wmb() wmb()
#define __smp_store_release(p, v) \
do { \
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index fd149480b6e2..5a530c552c23 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -188,7 +188,7 @@ static inline bool arch_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *ptr)
{
if (arch_test_bit(nr, ptr))
- return 1;
+ return true;
return arch_test_and_set_bit(nr, ptr);
}
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index 62228a884e06..26c710cd3485 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <linux/jump_label.h>
struct cpuid
{
@@ -21,5 +22,7 @@ struct cpuid
unsigned int unused : 16;
} __attribute__ ((packed, aligned(8)));
+DECLARE_STATIC_KEY_FALSE(cpu_has_bear);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_CPU_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 19a55e1e3a0c..77f24262c25c 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -462,7 +462,7 @@ arch_initcall(VNAME(var, reg))
*
* @var: Name of debug_info_t variable
* @name: Name of debug log (e.g. used for debugfs entry)
- * @pages_per_area: Number of pages per area
+ * @pages: Number of pages per area
* @nr_areas: Number of debug areas
* @buf_size: Size of data area in each debug entry
* @view: Pointer to debug view struct
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index e8b460f39c58..267f70f4393f 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -17,7 +17,6 @@
void ftrace_caller(void);
-extern char ftrace_graph_caller_end;
extern void *ftrace_func;
struct dyn_arch_ftrace { };
@@ -42,6 +41,35 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+
+static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ return &fregs->regs;
+}
+
+static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
+ unsigned long ip)
+{
+ struct pt_regs *regs = arch_ftrace_get_regs(fregs);
+
+ regs->psw.addr = ip;
+}
+
+/*
+ * When an ftrace registered caller is tracing a function that is
+ * also set by a register_ftrace_direct() call, it needs to be
+ * differentiated in the ftrace_caller trampoline. To do this,
+ * place the direct caller in the ORIG_GPR2 part of pt_regs. This
+ * tells the ftrace_caller that there's a direct caller.
+ */
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+{
+ regs->orig_gpr2 = addr;
+}
+
/*
* Even though the system call numbers are identical for s390/s390x a
* different system call table is used for compat tasks. This may lead
@@ -68,4 +96,32 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
}
#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define FTRACE_NOP_INSN .word 0xc004, 0x0000, 0x0000 /* brcl 0,0 */
+
+#ifndef CC_USING_HOTPATCH
+
+#define FTRACE_GEN_MCOUNT_RECORD(name) \
+ .section __mcount_loc, "a", @progbits; \
+ .quad name; \
+ .previous;
+
+#else /* !CC_USING_HOTPATCH */
+
+#define FTRACE_GEN_MCOUNT_RECORD(name)
+
+#endif /* !CC_USING_HOTPATCH */
+
+#define FTRACE_GEN_NOP_ASM(name) \
+ FTRACE_GEN_MCOUNT_RECORD(name) \
+ FTRACE_NOP_INSN
+
+#else /* CONFIG_FUNCTION_TRACER */
+
+#define FTRACE_GEN_NOP_ASM(name)
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index dcb1bba4f406..916cfcb36d8a 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -2,6 +2,8 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
+#define HAVE_JUMP_LABEL_BATCH
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index d578a8c76676..5209f223331a 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -16,9 +16,7 @@
static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
- struct pt_regs *regs = ftrace_get_regs(fregs);
-
- regs->psw.addr = ip;
+ ftrace_instruction_pointer_set(fregs, ip);
}
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 11213c8bfca5..1262f5003acf 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -65,7 +65,7 @@ struct lowcore {
__u32 external_damage_code; /* 0x00f4 */
__u64 failing_storage_address; /* 0x00f8 */
__u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
- __u64 breaking_event_addr; /* 0x0110 */
+ __u64 pgm_last_break; /* 0x0110 */
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
psw_t restart_old_psw; /* 0x0120 */
psw_t external_old_psw; /* 0x0130 */
@@ -93,9 +93,10 @@ struct lowcore {
psw_t return_psw; /* 0x0290 */
psw_t return_mcck_psw; /* 0x02a0 */
+ __u64 last_break; /* 0x02b0 */
+
/* CPU accounting and timing values. */
- __u64 sys_enter_timer; /* 0x02b0 */
- __u8 pad_0x02b8[0x02c0-0x02b8]; /* 0x02b8 */
+ __u64 sys_enter_timer; /* 0x02b8 */
__u64 mcck_enter_timer; /* 0x02c0 */
__u64 exit_timer; /* 0x02c8 */
__u64 user_timer; /* 0x02d0 */
@@ -188,7 +189,7 @@ struct lowcore {
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
- __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
+ __u64 last_break_save_area; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index b4bd8c41e9d3..82725cf783c7 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -12,6 +12,11 @@ void nospec_init_branches(void);
void nospec_auto_detect(void);
void nospec_revert(s32 *start, s32 *end);
+static inline bool nospec_uses_trampoline(void)
+{
+ return __is_defined(CC_USING_EXPOLINE) && !nospec_disable;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e43416950245..008a6c856fa4 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -583,11 +583,11 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new
#define CRDTE_DTT_REGION1 0x1cUL
static inline void crdte(unsigned long old, unsigned long new,
- unsigned long table, unsigned long dtt,
+ unsigned long *table, unsigned long dtt,
unsigned long address, unsigned long asce)
{
union register_pair r1 = { .even = old, .odd = new, };
- union register_pair r2 = { .even = table | dtt, .odd = address, };
+ union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
: [r1] "+&d" (r1.pair)
@@ -1001,7 +1001,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
unsigned long opt, unsigned long asce,
int local)
{
- unsigned long pto = (unsigned long) ptep;
+ unsigned long pto = __pa(ptep);
if (__builtin_constant_p(opt) && opt == 0) {
/* Invalidation + TLB flush for the pte */
@@ -1023,7 +1023,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
pte_t *ptep, int local)
{
- unsigned long pto = (unsigned long) ptep;
+ unsigned long pto = __pa(ptep);
/* Invalidate a range of ptes + TLB flush of the ptes */
do {
@@ -1487,7 +1487,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
{
unsigned long sto;
- sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
+ sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
if (__builtin_constant_p(opt) && opt == 0) {
/* flush without guest asce */
asm volatile(
@@ -1513,7 +1513,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
{
unsigned long r3o;
- r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
+ r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3;
if (__builtin_constant_p(opt) && opt == 0) {
/* flush without guest asce */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 61b22aa990e7..4ffa8e7f0ed3 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -76,8 +76,7 @@ enum {
* The pt_regs struct defines the way the registers are stored on
* the stack during a system call.
*/
-struct pt_regs
-{
+struct pt_regs {
union {
user_pt_regs user_regs;
struct {
@@ -97,6 +96,7 @@ struct pt_regs
};
unsigned long flags;
unsigned long cr1;
+ unsigned long last_break;
};
/*
@@ -197,6 +197,25 @@ const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_kernel_argument() returns @n th argument of the function call.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned int argoffset = STACK_FRAME_OVERHEAD / sizeof(long);
+
+#define NR_REG_ARGUMENTS 5
+ if (n < NR_REG_ARGUMENTS)
+ return regs_get_register(regs, 2 + n);
+ n -= NR_REG_ARGUMENTS;
+ return regs_get_kernel_stack_nth(regs, argoffset + n);
+}
+
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->gprs[15];
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index e3ae937bef1c..c68ea35de498 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -117,6 +117,7 @@ struct zpci_report_error_header {
extern char *sclp_early_sccb;
+void sclp_early_adjust_va(void);
void sclp_early_set_buffer(void *sccb);
int sclp_early_read_info(void);
int sclp_early_read_storage_info(void);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b6606ffd85d8..77e6506898f5 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -11,8 +11,8 @@
#include <linux/build_bug.h>
#define PARMAREA 0x10400
-#define HEAD_END 0x11000
+#define COMMAND_LINE_SIZE CONFIG_COMMAND_LINE_SIZE
/*
* Machine features detected in early.c
*/
@@ -43,6 +43,8 @@
#define STARTUP_NORMAL_OFFSET 0x10000
#define STARTUP_KDUMP_OFFSET 0x10010
+#define LEGACY_COMMAND_LINE_SIZE 896
+
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
@@ -55,8 +57,9 @@ struct parmarea {
unsigned long oldmem_base; /* 0x10418 */
unsigned long oldmem_size; /* 0x10420 */
unsigned long kernel_version; /* 0x10428 */
- char pad1[0x10480 - 0x10430]; /* 0x10430 - 0x10480 */
- char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
+ unsigned long max_command_line_size; /* 0x10430 */
+ char pad1[0x10480-0x10438]; /* 0x10438 - 0x10480 */
+ char command_line[COMMAND_LINE_SIZE]; /* 0x10480 */
};
extern struct parmarea parmarea;
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 4fd66c5e8934..3fae93ddb322 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -31,22 +31,18 @@ void *memmove(void *dest, const void *src, size_t n);
#define __HAVE_ARCH_STRCMP /* arch function */
#define __HAVE_ARCH_STRCPY /* inline & arch function */
#define __HAVE_ARCH_STRLCAT /* arch function */
-#define __HAVE_ARCH_STRLCPY /* arch function */
#define __HAVE_ARCH_STRLEN /* inline & arch function */
#define __HAVE_ARCH_STRNCAT /* arch function */
#define __HAVE_ARCH_STRNCPY /* arch function */
#define __HAVE_ARCH_STRNLEN /* inline & arch function */
-#define __HAVE_ARCH_STRRCHR /* arch function */
#define __HAVE_ARCH_STRSTR /* arch function */
/* Prototypes for non-inlined arch strings functions. */
int memcmp(const void *s1, const void *s2, size_t n);
int strcmp(const char *s1, const char *s2);
size_t strlcat(char *dest, const char *src, size_t n);
-size_t strlcpy(char *dest, const char *src, size_t size);
char *strncat(char *dest, const char *src, size_t n);
char *strncpy(char *dest, const char *src, size_t n);
-char *strrchr(const char *s, int c);
char *strstr(const char *s1, const char *s2);
#endif /* !CONFIG_KASAN */
diff --git a/arch/s390/include/asm/text-patching.h b/arch/s390/include/asm/text-patching.h
new file mode 100644
index 000000000000..b219056a8817
--- /dev/null
+++ b/arch/s390/include/asm/text-patching.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_TEXT_PATCHING_H
+#define _ASM_S390_TEXT_PATCHING_H
+
+#include <asm/barrier.h>
+
+static __always_inline void sync_core(void)
+{
+ bcr_serialize();
+}
+
+void text_poke_sync(void);
+void text_poke_sync_lock(void);
+
+#endif /* _ASM_S390_TEXT_PATCHING_H */
diff --git a/arch/s390/include/uapi/asm/setup.h b/arch/s390/include/uapi/asm/setup.h
index 1f8803a31079..598d769e76df 100644
--- a/arch/s390/include/uapi/asm/setup.h
+++ b/arch/s390/include/uapi/asm/setup.h
@@ -1,14 +1 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * S390 version
- * Copyright IBM Corp. 1999, 2010
- */
-
-#ifndef _UAPI_ASM_S390_SETUP_H
-#define _UAPI_ASM_S390_SETUP_H
-
-#define COMMAND_LINE_SIZE 4096
-
-#define ARCH_COMMAND_LINE_SIZE 896
-
-#endif /* _UAPI_ASM_S390_SETUP_H */
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index c22ea1c3ef84..cce0ddee2d02 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
@@ -110,3 +113,20 @@ void __init apply_alternative_instructions(void)
{
apply_alternatives(__alt_instructions, __alt_instructions_end);
}
+
+static void do_sync_core(void *info)
+{
+ sync_core();
+}
+
+void text_poke_sync(void)
+{
+ on_each_cpu(do_sync_core, NULL, 1);
+}
+
+void text_poke_sync_lock(void)
+{
+ cpus_read_lock();
+ text_poke_sync();
+ cpus_read_unlock();
+}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index b57da9338588..8e00bb228662 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
OFFSET(__PT_FLAGS, pt_regs, flags);
OFFSET(__PT_CR1, pt_regs, cr1);
+ OFFSET(__PT_LAST_BREAK, pt_regs, last_break);
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK();
/* stack_frame offsets */
@@ -45,6 +46,7 @@ int main(void)
OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
+ DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
/* idle data offsets */
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
@@ -77,7 +79,7 @@ int main(void)
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
- OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
+ OFFSET(__LC_PGM_LAST_BREAK, lowcore, pgm_last_break);
OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
@@ -126,6 +128,7 @@ int main(void)
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
+ OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
@@ -139,6 +142,7 @@ int main(void)
OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area);
OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area);
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area);
+ OFFSET(__LC_LAST_BREAK_SAVE_AREA, lowcore, last_break_save_area);
OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area);
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
@@ -160,5 +164,6 @@ int main(void)
DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
+ DEFINE(MAX_COMMAND_LINE_SIZE, PARMAREA + offsetof(struct parmarea, max_command_line_size));
return 0;
}
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 54efc279f54e..72e106cfd8c7 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -29,7 +29,7 @@ static int diag8_noresponse(int cmdlen)
asm volatile(
" diag %[rx],%[ry],0x8\n"
: [ry] "+&d" (cmdlen)
- : [rx] "d" ((addr_t) cpcmd_buf)
+ : [rx] "d" (__pa(cpcmd_buf))
: "cc");
return cmdlen;
}
@@ -39,8 +39,8 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
union register_pair rx, ry;
int cc;
- rx.even = (addr_t) cpcmd_buf;
- rx.odd = (addr_t) response;
+ rx.even = __pa(cpcmd_buf);
+ rx.odd = __pa(response);
ry.even = cmdlen | 0x40000000L;
ry.odd = *rlen;
asm volatile(
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index db1bc00229ca..85f326e258df 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -152,7 +152,7 @@ void show_stack(struct task_struct *task, unsigned long *stack,
static void show_last_breaking_event(struct pt_regs *regs)
{
printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
+ printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break);
}
void show_registers(struct pt_regs *regs)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 9857cb046726..3cdf68c53614 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -280,7 +280,7 @@ char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
static void __init setup_boot_command_line(void)
{
/* copy arch command line */
- strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
}
static void __init check_image_bootable(void)
@@ -296,6 +296,7 @@ static void __init check_image_bootable(void)
void __init startup_init(void)
{
+ sclp_early_adjust_va();
reset_tod_clock();
check_image_bootable();
time_early_init();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4c9b967290ae..01bae1d51113 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -52,6 +52,22 @@ STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_LPP_OFFSET = __LC_LPP
+ .macro STBEAR address
+ ALTERNATIVE "", ".insn s,0xb2010000,\address", 193
+ .endm
+
+ .macro LBEAR address
+ ALTERNATIVE "", ".insn s,0xb2000000,\address", 193
+ .endm
+
+ .macro LPSWEY address,lpswe
+ ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193
+ .endm
+
+ .macro MBEAR reg
+ ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
+ .endm
+
.macro CHECK_STACK savearea
#ifdef CONFIG_CHECK_STACK
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
@@ -302,6 +318,7 @@ ENTRY(system_call)
BPOFF
lghi %r14,0
.Lsysc_per:
+ STBEAR __LC_LAST_BREAK
lctlg %c1,%c1,__LC_KERNEL_ASCE
lg %r12,__LC_CURRENT
lg %r15,__LC_KERNEL_STACK
@@ -321,14 +338,16 @@ ENTRY(system_call)
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
+ MBEAR %r2
lgr %r3,%r14
brasl %r14,__do_syscall
lctlg %c1,%c1,__LC_USER_ASCE
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
- b __LC_RETURN_LPSWE
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
ENDPROC(system_call)
#
@@ -340,9 +359,10 @@ ENTRY(ret_from_fork)
lctlg %c1,%c1,__LC_USER_ASCE
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
- b __LC_RETURN_LPSWE
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
ENDPROC(ret_from_fork)
/*
@@ -382,6 +402,7 @@ ENTRY(pgm_check_handler)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
stmg %r8,%r9,__PT_PSW(%r11)
# clear user controlled registers to prevent speculative use
@@ -401,8 +422,9 @@ ENTRY(pgm_check_handler)
stpt __LC_EXIT_TIMER
.Lpgm_exit_kernel:
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- b __LC_RETURN_LPSWE
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
#
# single stepped system call
@@ -412,7 +434,8 @@ ENTRY(pgm_check_handler)
larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
lghi %r14,1
- lpswe __LC_RETURN_PSW # branch to .Lsysc_per
+ LBEAR __LC_PGM_LAST_BREAK
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
ENDPROC(pgm_check_handler)
/*
@@ -422,6 +445,7 @@ ENDPROC(pgm_check_handler)
ENTRY(\name)
STCK __LC_INT_CLOCK
stpt __LC_SYS_ENTER_TIMER
+ STBEAR __LC_LAST_BREAK
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
@@ -453,6 +477,7 @@ ENTRY(\name)
xgr %r10,%r10
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ MBEAR %r11
stmg %r8,%r9,__PT_PSW(%r11)
tm %r8,0x0001 # coming from user space?
jno 1f
@@ -465,8 +490,9 @@ ENTRY(\name)
lctlg %c1,%c1,__LC_USER_ASCE
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
-2: lmg %r0,%r15,__PT_R0(%r11)
- b __LC_RETURN_LPSWE
+2: LBEAR __PT_LAST_BREAK(%r11)
+ lmg %r0,%r15,__PT_R0(%r11)
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
ENDPROC(\name)
.endm
@@ -505,6 +531,7 @@ ENTRY(mcck_int_handler)
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
+ LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT
lmg %r8,%r9,__LC_MCK_OLD_PSW
@@ -591,8 +618,10 @@ ENTRY(mcck_int_handler)
jno 0f
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
-0: lmg %r11,%r15,__PT_R11(%r11)
- b __LC_RETURN_MCCK_LPSWE
+0: ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
+ LBEAR 0(%r12)
+ lmg %r11,%r15,__PT_R11(%r11)
+ LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
.Lmcck_panic:
/*
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 7f2696e8d511..6083090be1f4 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -70,5 +70,6 @@ extern struct exception_table_entry _stop_amode31_ex_table[];
#define __amode31_data __section(".amode31.data")
#define __amode31_ref __section(".amode31.refs")
extern long _start_amode31_refs[], _end_amode31_refs[];
+extern unsigned long __amode31_base;
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5165bf344f95..5510c7d10ddc 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -17,6 +17,7 @@
#include <linux/kprobes.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
+#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.lds.h>
#include <asm/nospec-branch.h>
@@ -80,17 +81,6 @@ asm(
#ifdef CONFIG_MODULES
static char *ftrace_plt;
-
-asm(
- " .data\n"
- "ftrace_plt_template:\n"
- " basr %r1,%r0\n"
- " lg %r1,0f-.(%r1)\n"
- " br %r1\n"
- "0: .quad ftrace_caller\n"
- "ftrace_plt_template_end:\n"
- " .previous\n"
-);
#endif /* CONFIG_MODULES */
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
@@ -116,7 +106,7 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
bool ftrace_need_init_nop(void)
{
- return ftrace_shared_hotpatch_trampoline(NULL);
+ return true;
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
@@ -175,28 +165,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
-static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
-{
- /* brcl 0,0 */
- insn->opc = 0xc004;
- insn->disp = 0;
-}
-
-static void ftrace_generate_call_insn(struct ftrace_insn *insn,
- unsigned long ip)
-{
- unsigned long target;
-
- /* brasl r0,ftrace_caller */
- target = FTRACE_ADDR;
-#ifdef CONFIG_MODULES
- if (is_module_addr((void *)ip))
- target = (unsigned long)ftrace_plt;
-#endif /* CONFIG_MODULES */
- insn->opc = 0xc005;
- insn->disp = (target - ip) / 2;
-}
-
static void brcl_disable(void *brcl)
{
u8 op = 0x04; /* set mask field to zero */
@@ -207,23 +175,7 @@ static void brcl_disable(void *brcl)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- struct ftrace_insn orig, new, old;
-
- if (ftrace_shared_hotpatch_trampoline(NULL)) {
- brcl_disable((void *)rec->ip);
- return 0;
- }
-
- if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
- return -EFAULT;
- /* Replace ftrace call with a nop. */
- ftrace_generate_call_insn(&orig, rec->ip);
- ftrace_generate_nop_insn(&new);
-
- /* Verify that the to be replaced code matches what we expect. */
- if (memcmp(&orig, &old, sizeof(old)))
- return -EINVAL;
- s390_kernel_write((void *) rec->ip, &new, sizeof(new));
+ brcl_disable((void *)rec->ip);
return 0;
}
@@ -236,23 +188,7 @@ static void brcl_enable(void *brcl)
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- struct ftrace_insn orig, new, old;
-
- if (ftrace_shared_hotpatch_trampoline(NULL)) {
- brcl_enable((void *)rec->ip);
- return 0;
- }
-
- if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
- return -EFAULT;
- /* Replace nop with an ftrace call. */
- ftrace_generate_nop_insn(&orig);
- ftrace_generate_call_insn(&new, rec->ip);
-
- /* Verify that the to be replaced code matches what we expect. */
- if (memcmp(&orig, &old, sizeof(old)))
- return -EINVAL;
- s390_kernel_write((void *) rec->ip, &new, sizeof(new));
+ brcl_enable((void *)rec->ip);
return 0;
}
@@ -264,22 +200,16 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
void arch_ftrace_update_code(int command)
{
- if (ftrace_shared_hotpatch_trampoline(NULL))
- ftrace_modify_all_code(command);
- else
- ftrace_run_stop_machine(command);
-}
-
-static void __ftrace_sync(void *dummy)
-{
+ ftrace_modify_all_code(command);
}
int ftrace_arch_code_modify_post_process(void)
{
- if (ftrace_shared_hotpatch_trampoline(NULL)) {
- /* Send SIGP to the other CPUs, so they see the new code. */
- smp_call_function(__ftrace_sync, NULL, 1);
- }
+ /*
+ * Flush any pre-fetched instructions on all
+ * CPUs to make the new code visible.
+ */
+ text_poke_sync_lock();
return 0;
}
@@ -294,10 +224,6 @@ static int __init ftrace_plt_init(void)
panic("cannot allocate ftrace plt\n");
start = ftrace_shared_hotpatch_trampoline(&end);
- if (!start) {
- start = ftrace_plt_template;
- end = ftrace_plt_template_end;
- }
memcpy(ftrace_plt, start, end - start);
set_memory_ro((unsigned long)ftrace_plt, 1);
return 0;
@@ -337,12 +263,14 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
int ftrace_enable_ftrace_graph_caller(void)
{
brcl_disable(ftrace_graph_caller);
+ text_poke_sync_lock();
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
brcl_enable(ftrace_graph_caller);
+ text_poke_sync_lock();
return 0;
}
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 114b5490ad8e..42f9a325a257 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -20,8 +20,6 @@ __HEAD
ENTRY(startup_continue)
larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
- larl %r13,.LPG1 # get base
- lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
#
# Setup stack
#
@@ -42,19 +40,3 @@ ENTRY(startup_continue)
.align 16
.LPG1:
.Ldw: .quad 0x0002000180000000,0x0000000000000000
-.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
- .quad 0 # cr1: primary space segment table
- .quad 0 # cr2: dispatchable unit control table
- .quad 0 # cr3: instruction authorization
- .quad 0xffff # cr4: instruction authorization
- .quad 0 # cr5: primary-aste origin
- .quad 0 # cr6: I/O interrupts
- .quad 0 # cr7: secondary space segment table
- .quad 0x0000000000008000 # cr8: access registers translation
- .quad 0 # cr9: tracing off
- .quad 0 # cr10: tracing off
- .quad 0 # cr11: tracing off
- .quad 0 # cr12: tracing off
- .quad 0 # cr13: home space segment table
- .quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 3a3145c4a3ba..0df83ecaa2e0 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -140,8 +140,11 @@ void noinstr do_io_irq(struct pt_regs *regs)
irq_enter();
- if (user_mode(regs))
+ if (user_mode(regs)) {
update_timer_sys();
+ if (static_branch_likely(&cpu_has_bear))
+ current->thread.last_break = regs->last_break;
+ }
from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
if (from_idle)
@@ -171,8 +174,11 @@ void noinstr do_ext_irq(struct pt_regs *regs)
irq_enter();
- if (user_mode(regs))
+ if (user_mode(regs)) {
update_timer_sys();
+ if (static_branch_likely(&cpu_has_bear))
+ current->thread.last_break = regs->last_break;
+ }
regs->int_code = S390_lowcore.ext_int_code_addr;
regs->int_parm = S390_lowcore.ext_params;
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 9156653b56f6..6bec000c6c1c 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -6,8 +6,9 @@
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/uaccess.h>
-#include <linux/stop_machine.h>
#include <linux/jump_label.h>
+#include <linux/module.h>
+#include <asm/text-patching.h>
#include <asm/ipl.h>
struct insn {
@@ -48,9 +49,9 @@ static struct insn orignop = {
.offset = JUMP_LABEL_NOP_OFFSET >> 1,
};
-static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- int init)
+static void jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ int init)
{
void *code = (void *)jump_entry_code(entry);
struct insn old, new;
@@ -72,19 +73,28 @@ static void __jump_label_transform(struct jump_entry *entry,
s390_kernel_write(code, &new, sizeof(new));
}
-static void __jump_label_sync(void *dummy)
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
{
+ jump_label_transform(entry, type, 0);
+ text_poke_sync();
}
-void arch_jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
+bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ jump_label_transform(entry, type, 0);
+ return true;
+}
+
+void arch_jump_label_transform_apply(void)
{
- __jump_label_transform(entry, type, 0);
- smp_call_function(__jump_label_sync, NULL, 1);
+ text_poke_sync();
}
-void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type)
+void __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
{
- __jump_label_transform(entry, type, 1);
+ jump_label_transform(entry, type, 1);
+ text_poke_sync();
}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index c505c0ee5f47..e27a7d3b0364 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -122,9 +122,55 @@ static void s390_free_insn_slot(struct kprobe *p)
}
NOKPROBE_SYMBOL(s390_free_insn_slot);
+/* Check if paddr is at an instruction boundary */
+static bool can_probe(unsigned long paddr)
+{
+ unsigned long addr, offset = 0;
+ kprobe_opcode_t insn;
+ struct kprobe *kp;
+
+ if (paddr & 0x01)
+ return false;
+
+ if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
+ return false;
+
+ /* Decode instructions */
+ addr = paddr - offset;
+ while (addr < paddr) {
+ if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(insn)))
+ return false;
+
+ if (insn >> 8 == 0) {
+ if (insn != BREAKPOINT_INSTRUCTION) {
+ /*
+ * Note that QEMU inserts opcode 0x0000 to implement
+ * software breakpoints for guests. Since the size of
+ * the original instruction is unknown, stop following
+ * instructions and prevent setting a kprobe.
+ */
+ return false;
+ }
+ /*
+ * Check if the instruction has been modified by another
+ * kprobe, in which case the original instruction is
+ * decoded.
+ */
+ kp = get_kprobe((void *)addr);
+ if (!kp) {
+ /* not a kprobe */
+ return false;
+ }
+ insn = kp->opcode;
+ }
+ addr += insn_length(insn >> 8);
+ }
+ return addr == paddr;
+}
+
int arch_prepare_kprobe(struct kprobe *p)
{
- if ((unsigned long) p->addr & 0x01)
+ if (!can_probe((unsigned long)p->addr))
return -EINVAL;
/* Make sure the probe isn't going on a difficult instruction */
if (probe_is_prohibited_opcode(p->addr))
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index f9e4baa64b67..528edff085d9 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -216,7 +216,9 @@ void *kexec_file_add_components(struct kimage *image,
int (*add_kernel)(struct kimage *image,
struct s390_load_data *data))
{
+ unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE;
struct s390_load_data data = {0};
+ unsigned long minsize;
int ret;
data.report = ipl_report_init(&ipl_block);
@@ -227,10 +229,23 @@ void *kexec_file_add_components(struct kimage *image,
if (ret)
goto out;
- if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
- ret = -EINVAL;
+ ret = -EINVAL;
+ minsize = PARMAREA + offsetof(struct parmarea, command_line);
+ if (image->kernel_buf_len < minsize)
goto out;
- }
+
+ if (data.parm->max_command_line_size)
+ max_command_line_size = data.parm->max_command_line_size;
+
+ if (minsize + max_command_line_size < minsize)
+ goto out;
+
+ if (image->kernel_buf_len < minsize + max_command_line_size)
+ goto out;
+
+ if (image->cmdline_buf_len >= max_command_line_size)
+ goto out;
+
memcpy(data.parm->command_line, image->cmdline_buf,
image->cmdline_buf_len);
@@ -307,17 +322,3 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
}
return 0;
}
-
-int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len)
-{
- /* A kernel must be at least large enough to contain head.S. During
- * load memory in head.S will be accessed, e.g. to register the next
- * command line. If the next kernel were smaller the current kernel
- * will panic at load.
- */
- if (buf_len < HEAD_END)
- return -ENOEXEC;
-
- return kexec_image_probe_default(image, buf, buf_len);
-}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 6b13797143a7..39bcc0e39a10 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -22,10 +22,11 @@ ENTRY(ftrace_stub)
BR_EX %r14
ENDPROC(ftrace_stub)
-#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
-#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
-#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
-#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
+#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
+#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
+#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
#ifdef __PACK_STACK
/* allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
@@ -33,13 +34,15 @@ ENDPROC(ftrace_stub)
#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
#endif
-ENTRY(ftrace_caller)
- .globl ftrace_regs_caller
- .set ftrace_regs_caller,ftrace_caller
+ .macro ftrace_regs_entry, allregs=0
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
+
+ .if \allregs == 1
lghi %r14,0 # save condition code
ipm %r14 # don't put any instructions
sllg %r14,%r14,16 # clobbering CC before this point
+ .endif
+
lgr %r1,%r15
# allocate stack frame for ftrace_caller to contain traced function
aghi %r15,-TRACED_FUNC_FRAME_SIZE
@@ -49,13 +52,31 @@ ENTRY(ftrace_caller)
# allocate pt_regs and stack frame for ftrace_trace_function
aghi %r15,-STACK_FRAME_SIZE
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+ xc STACK_PTREGS_ORIG_GPR2(8,%r15),STACK_PTREGS_ORIG_GPR2(%r15)
+
+ .if \allregs == 1
stg %r14,(STACK_PTREGS_PSW)(%r15)
- lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
stosm (STACK_PTREGS_PSW)(%r15),0
+ .endif
+
+ lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
aghi %r1,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
+ .endm
+
+SYM_CODE_START(ftrace_regs_caller)
+ ftrace_regs_entry 1
+ j ftrace_common
+SYM_CODE_END(ftrace_regs_caller)
+
+SYM_CODE_START(ftrace_caller)
+ ftrace_regs_entry 0
+ j ftrace_common
+SYM_CODE_END(ftrace_caller)
+
+SYM_CODE_START(ftrace_common)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
@@ -74,24 +95,31 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
- .globl ftrace_graph_caller
-ftrace_graph_caller:
- j ftrace_graph_caller_end
+SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
+ j .Lftrace_graph_caller_end
lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
lg %r4,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
-ftrace_graph_caller_end:
- .globl ftrace_graph_caller_end
+.Lftrace_graph_caller_end:
+#endif
+ lg %r0,(STACK_PTREGS_PSW+8)(%r15)
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ ltg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
+ locgrz %r1,%r0
+#else
+ lg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
+ ltgr %r1,%r1
+ jnz 0f
+ lgr %r1,%r0
#endif
- lg %r1,(STACK_PTREGS_PSW+8)(%r15)
- lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
+0: lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
BR_EX %r1
-ENDPROC(ftrace_caller)
+SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(return_to_handler)
+SYM_FUNC_START(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -101,6 +129,6 @@ ENTRY(return_to_handler)
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
BR_EX %r14
-ENDPROC(return_to_handler)
+SYM_FUNC_END(return_to_handler)
#endif
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 250e4dbf653c..60e6fec27bba 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -38,7 +38,7 @@ static int __init nospec_report(void)
{
if (test_facility(156))
pr_info("Spectre V2 mitigation: etokens\n");
- if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
+ if (nospec_uses_trampoline())
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index b4b5c8c21166..52d4353188ad 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -15,7 +15,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
{
if (test_facility(156))
return sprintf(buf, "Mitigation: etokens\n");
- if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
+ if (nospec_uses_trampoline())
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 4a99154fe651..6f431fa9e4d7 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -773,22 +773,46 @@ static int __init cpumf_pmu_init(void)
* counter set via normal file operations.
*/
-static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Excl. access */
+static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Access count */
static DEFINE_MUTEX(cfset_ctrset_mutex);/* Synchronize access to hardware */
struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
unsigned int sets; /* Counter set bit mask */
atomic_t cpus_ack; /* # CPUs successfully executed func */
};
-static struct cfset_request { /* CPUs and counter set bit mask */
+static struct cfset_session { /* CPUs and counter set bit mask */
+ struct list_head head; /* Head of list of active processes */
+} cfset_session = {
+ .head = LIST_HEAD_INIT(cfset_session.head)
+};
+
+struct cfset_request { /* CPUs and counter set bit mask */
unsigned long ctrset; /* Bit mask of counter set to read */
cpumask_t mask; /* CPU mask to read from */
-} cfset_request;
+ struct list_head node; /* Chain to cfset_session.head */
+};
+
+static void cfset_session_init(void)
+{
+ INIT_LIST_HEAD(&cfset_session.head);
+}
+
+/* Remove current request from global bookkeeping. Maintain a counter set bit
+ * mask on a per CPU basis.
+ * Done in process context under mutex protection.
+ */
+static void cfset_session_del(struct cfset_request *p)
+{
+ list_del(&p->node);
+}
-static void cfset_ctrset_clear(void)
+/* Add current request to global bookkeeping. Maintain a counter set bit mask
+ * on a per CPU basis.
+ * Done in process context under mutex protection.
+ */
+static void cfset_session_add(struct cfset_request *p)
{
- cpumask_clear(&cfset_request.mask);
- cfset_request.ctrset = 0;
+ list_add(&p->node, &cfset_session.head);
}
/* The /dev/hwctr device access uses PMU_F_IN_USE to mark the device access
@@ -827,15 +851,23 @@ static void cfset_ioctl_off(void *parm)
struct cfset_call_on_cpu_parm *p = parm;
int rc;
- cpuhw->dev_state = 0;
+ /* Check if any counter set used by /dev/hwc */
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
- if ((p->sets & cpumf_ctr_ctl[rc]))
- atomic_dec(&cpuhw->ctr_set[rc]);
- rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
+ if ((p->sets & cpumf_ctr_ctl[rc])) {
+ if (!atomic_dec_return(&cpuhw->ctr_set[rc])) {
+ ctr_set_disable(&cpuhw->dev_state,
+ cpumf_ctr_ctl[rc]);
+ ctr_set_stop(&cpuhw->dev_state,
+ cpumf_ctr_ctl[rc]);
+ }
+ }
+ /* Keep perf_event_open counter sets */
+ rc = lcctl(cpuhw->dev_state | cpuhw->state);
if (rc)
pr_err("Counter set stop %#llx of /dev/%s failed rc=%i\n",
cpuhw->state, S390_HWCTR_DEVICE, rc);
- cpuhw->flags &= ~PMU_F_IN_USE;
+ if (!cpuhw->dev_state)
+ cpuhw->flags &= ~PMU_F_IN_USE;
debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
__func__, rc, cpuhw->state, cpuhw->dev_state);
}
@@ -870,11 +902,26 @@ static void cfset_release_cpu(void *p)
debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n",
__func__, cpuhw->state, cpuhw->dev_state);
+ cpuhw->dev_state = 0;
rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
if (rc)
pr_err("Counter set release %#llx of /dev/%s failed rc=%i\n",
cpuhw->state, S390_HWCTR_DEVICE, rc);
- cpuhw->dev_state = 0;
+}
+
+/* This modifies the process CPU mask to adopt it to the currently online
+ * CPUs. Offline CPUs can not be addresses. This call terminates the access
+ * and is usually followed by close() or a new iotcl(..., START, ...) which
+ * creates a new request structure.
+ */
+static void cfset_all_stop(struct cfset_request *req)
+{
+ struct cfset_call_on_cpu_parm p = {
+ .sets = req->ctrset,
+ };
+
+ cpumask_and(&req->mask, &req->mask, cpu_online_mask);
+ on_each_cpu_mask(&req->mask, cfset_ioctl_off, &p, 1);
}
/* Release function is also called when application gets terminated without
@@ -882,10 +929,19 @@ static void cfset_release_cpu(void *p)
*/
static int cfset_release(struct inode *inode, struct file *file)
{
- on_each_cpu(cfset_release_cpu, NULL, 1);
+ mutex_lock(&cfset_ctrset_mutex);
+ /* Open followed by close/exit has no private_data */
+ if (file->private_data) {
+ cfset_all_stop(file->private_data);
+ cfset_session_del(file->private_data);
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+ if (!atomic_dec_return(&cfset_opencnt))
+ on_each_cpu(cfset_release_cpu, NULL, 1);
+ mutex_unlock(&cfset_ctrset_mutex);
+
hw_perf_event_destroy(NULL);
- cfset_ctrset_clear();
- atomic_set(&cfset_opencnt, 0);
return 0;
}
@@ -893,9 +949,10 @@ static int cfset_open(struct inode *inode, struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Only one user space program can open /dev/hwctr */
- if (atomic_xchg(&cfset_opencnt, 1))
- return -EBUSY;
+ mutex_lock(&cfset_ctrset_mutex);
+ if (atomic_inc_return(&cfset_opencnt) == 1)
+ cfset_session_init();
+ mutex_unlock(&cfset_ctrset_mutex);
cpumf_hw_inuse();
file->private_data = NULL;
@@ -903,25 +960,10 @@ static int cfset_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static int cfset_all_stop(void)
+static int cfset_all_start(struct cfset_request *req)
{
struct cfset_call_on_cpu_parm p = {
- .sets = cfset_request.ctrset,
- };
- cpumask_var_t mask;
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
- on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
- free_cpumask_var(mask);
- return 0;
-}
-
-static int cfset_all_start(void)
-{
- struct cfset_call_on_cpu_parm p = {
- .sets = cfset_request.ctrset,
+ .sets = req->ctrset,
.cpus_ack = ATOMIC_INIT(0),
};
cpumask_var_t mask;
@@ -929,7 +971,7 @@ static int cfset_all_start(void)
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
- cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
+ cpumask_and(mask, &req->mask, cpu_online_mask);
on_each_cpu_mask(mask, cfset_ioctl_on, &p, 1);
if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
@@ -1045,7 +1087,7 @@ static void cfset_cpu_read(void *parm)
cpuhw->sets, cpuhw->used);
}
-static int cfset_all_read(unsigned long arg)
+static int cfset_all_read(unsigned long arg, struct cfset_request *req)
{
struct cfset_call_on_cpu_parm p;
cpumask_var_t mask;
@@ -1054,46 +1096,53 @@ static int cfset_all_read(unsigned long arg)
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
- p.sets = cfset_request.ctrset;
- cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
+ p.sets = req->ctrset;
+ cpumask_and(mask, &req->mask, cpu_online_mask);
on_each_cpu_mask(mask, cfset_cpu_read, &p, 1);
rc = cfset_all_copy(arg, mask);
free_cpumask_var(mask);
return rc;
}
-static long cfset_ioctl_read(unsigned long arg)
+static long cfset_ioctl_read(unsigned long arg, struct cfset_request *req)
{
struct s390_ctrset_read read;
- int ret = 0;
+ int ret = -ENODATA;
- if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
- return -EFAULT;
- ret = cfset_all_read(arg);
+ if (req && req->ctrset) {
+ if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
+ return -EFAULT;
+ ret = cfset_all_read(arg, req);
+ }
return ret;
}
-static long cfset_ioctl_stop(void)
+static long cfset_ioctl_stop(struct file *file)
{
- int ret = ENXIO;
-
- if (cfset_request.ctrset) {
- ret = cfset_all_stop();
- cfset_ctrset_clear();
+ struct cfset_request *req = file->private_data;
+ int ret = -ENXIO;
+
+ if (req) {
+ cfset_all_stop(req);
+ cfset_session_del(req);
+ kfree(req);
+ file->private_data = NULL;
+ ret = 0;
}
return ret;
}
-static long cfset_ioctl_start(unsigned long arg)
+static long cfset_ioctl_start(unsigned long arg, struct file *file)
{
struct s390_ctrset_start __user *ustart;
struct s390_ctrset_start start;
+ struct cfset_request *preq;
void __user *umask;
unsigned int len;
int ret = 0;
size_t need;
- if (cfset_request.ctrset)
+ if (file->private_data)
return -EBUSY;
ustart = (struct s390_ctrset_start __user *)arg;
if (copy_from_user(&start, ustart, sizeof(start)))
@@ -1108,25 +1157,36 @@ static long cfset_ioctl_start(unsigned long arg)
return -EINVAL; /* Invalid counter set */
if (!start.counter_sets)
return -EINVAL; /* No counter set at all? */
- cpumask_clear(&cfset_request.mask);
+
+ preq = kzalloc(sizeof(*preq), GFP_KERNEL);
+ if (!preq)
+ return -ENOMEM;
+ cpumask_clear(&preq->mask);
len = min_t(u64, start.cpumask_len, cpumask_size());
umask = (void __user *)start.cpumask;
- if (copy_from_user(&cfset_request.mask, umask, len))
+ if (copy_from_user(&preq->mask, umask, len)) {
+ kfree(preq);
return -EFAULT;
- if (cpumask_empty(&cfset_request.mask))
+ }
+ if (cpumask_empty(&preq->mask)) {
+ kfree(preq);
return -EINVAL;
+ }
need = cfset_needspace(start.counter_sets);
- if (put_user(need, &ustart->data_bytes))
- ret = -EFAULT;
- if (ret)
- goto out;
- cfset_request.ctrset = start.counter_sets;
- ret = cfset_all_start();
-out:
- if (ret)
- cfset_ctrset_clear();
- debug_sprintf_event(cf_dbg, 4, "%s sets %#lx need %ld ret %d\n",
- __func__, cfset_request.ctrset, need, ret);
+ if (put_user(need, &ustart->data_bytes)) {
+ kfree(preq);
+ return -EFAULT;
+ }
+ preq->ctrset = start.counter_sets;
+ ret = cfset_all_start(preq);
+ if (!ret) {
+ cfset_session_add(preq);
+ file->private_data = preq;
+ debug_sprintf_event(cf_dbg, 4, "%s set %#lx need %ld ret %d\n",
+ __func__, preq->ctrset, need, ret);
+ } else {
+ kfree(preq);
+ }
return ret;
}
@@ -1136,7 +1196,7 @@ out:
* counter set keeps running until explicitly stopped. Returns the number
* of bytes needed to store the counter values. If another S390_HWCTR_START
* ioctl subcommand is called without a previous S390_HWCTR_STOP stop
- * command, -EBUSY is returned.
+ * command on the same file descriptor, -EBUSY is returned.
* S390_HWCTR_READ: Read the counter set values from specified CPU list given
* with the S390_HWCTR_START command.
* S390_HWCTR_STOP: Stops the counter sets on the CPU list given with the
@@ -1150,13 +1210,13 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&cfset_ctrset_mutex);
switch (cmd) {
case S390_HWCTR_START:
- ret = cfset_ioctl_start(arg);
+ ret = cfset_ioctl_start(arg, file);
break;
case S390_HWCTR_STOP:
- ret = cfset_ioctl_stop();
+ ret = cfset_ioctl_stop(file);
break;
case S390_HWCTR_READ:
- ret = cfset_ioctl_read(arg);
+ ret = cfset_ioctl_read(arg, file->private_data);
break;
default:
ret = -ENOTTY;
@@ -1182,29 +1242,41 @@ static struct miscdevice cfset_dev = {
.fops = &cfset_fops,
};
+/* Hotplug add of a CPU. Scan through all active processes and add
+ * that CPU to the list of CPUs supplied with ioctl(..., START, ...).
+ */
int cfset_online_cpu(unsigned int cpu)
{
struct cfset_call_on_cpu_parm p;
+ struct cfset_request *rp;
mutex_lock(&cfset_ctrset_mutex);
- if (cfset_request.ctrset) {
- p.sets = cfset_request.ctrset;
- cfset_ioctl_on(&p);
- cpumask_set_cpu(cpu, &cfset_request.mask);
+ if (!list_empty(&cfset_session.head)) {
+ list_for_each_entry(rp, &cfset_session.head, node) {
+ p.sets = rp->ctrset;
+ cfset_ioctl_on(&p);
+ cpumask_set_cpu(cpu, &rp->mask);
+ }
}
mutex_unlock(&cfset_ctrset_mutex);
return 0;
}
+/* Hotplug remove of a CPU. Scan through all active processes and clear
+ * that CPU from the list of CPUs supplied with ioctl(..., START, ...).
+ */
int cfset_offline_cpu(unsigned int cpu)
{
struct cfset_call_on_cpu_parm p;
+ struct cfset_request *rp;
mutex_lock(&cfset_ctrset_mutex);
- if (cfset_request.ctrset) {
- p.sets = cfset_request.ctrset;
- cfset_ioctl_off(&p);
- cpumask_clear_cpu(cpu, &cfset_request.mask);
+ if (!list_empty(&cfset_session.head)) {
+ list_for_each_entry(rp, &cfset_session.head, node) {
+ p.sets = rp->ctrset;
+ cfset_ioctl_off(&p);
+ cpumask_clear_cpu(cpu, &rp->mask);
+ }
}
mutex_unlock(&cfset_ctrset_mutex);
return 0;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index e5dd46b1bff8..e8858b2de24b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -141,7 +141,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
frame->childregs.gprs[10] = arg;
frame->childregs.gprs[11] = (unsigned long)do_exit;
frame->childregs.orig_gpr2 = -1;
-
+ frame->childregs.last_break = 1;
return 0;
}
frame->childregs = *current_pt_regs();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8a378d426239..40405f2304f1 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -95,10 +95,10 @@ EXPORT_SYMBOL(console_irq);
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .amode31 section.
*/
-unsigned long __amode31_ref __samode31 = __pa(&_samode31);
-unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
-unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
-unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
+unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
+unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
+unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
+unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
@@ -149,6 +149,7 @@ struct mem_detect_info __bootdata(mem_detect);
struct initrd_data __bootdata(initrd_data);
unsigned long __bootdata_preserved(__kaslr_offset);
+unsigned long __bootdata(__amode31_base);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -173,6 +174,8 @@ unsigned long MODULES_END;
struct lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
+DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
+
/*
* The Write Back bit position in the physaddr is given by the SLPC PCI.
* Leaving the mask zero always uses write through which is safe
@@ -719,7 +722,7 @@ static void __init reserve_initrd(void)
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_data.start || !initrd_data.size)
return;
- initrd_start = initrd_data.start;
+ initrd_start = (unsigned long)__va(initrd_data.start);
initrd_end = initrd_start + initrd_data.size;
memblock_reserve(initrd_data.start, initrd_data.size);
#endif
@@ -805,12 +808,10 @@ static void __init check_initrd(void)
*/
static void __init reserve_kernel(void)
{
- unsigned long start_pfn = PFN_UP(__pa(_end));
-
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
- memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
- memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- - (unsigned long)_stext);
+ memblock_reserve(__amode31_base, __eamode31 - __samode31);
+ memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
+ memblock_reserve(__pa(_stext), _end - _stext);
}
static void __init setup_memory(void)
@@ -832,20 +833,14 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void)
{
- unsigned long amode31_addr, amode31_size;
- long amode31_offset;
+ unsigned long amode31_size = __eamode31 - __samode31;
+ long amode31_offset = __amode31_base - __samode31;
long *ptr;
- /* Allocate a new AMODE31 capable memory region */
- amode31_size = __eamode31 - __samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
- amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
- if (!amode31_addr)
- panic("Failed to allocate memory for AMODE31 section\n");
- amode31_offset = amode31_addr - __samode31;
/* Move original AMODE31 section to the new one */
- memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
+ memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size);
@@ -884,14 +879,12 @@ static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
- vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
- PAGE_SIZE);
+ vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!vmms)
panic("Failed to allocate memory for sysinfo structure\n");
-
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
- memblock_phys_free((unsigned long)vmms, PAGE_SIZE);
+ memblock_free(vmms, PAGE_SIZE);
}
/*
@@ -1048,6 +1041,9 @@ void __init setup_arch(char **cmdline_p)
smp_detect_cpus();
topology_init_early();
+ if (test_facility(193))
+ static_branch_enable(&cpu_has_bear);
+
/*
* Create kernel page tables and switch to virtual addressing.
*/
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index 8fe2d23b64f4..dc2355c623d6 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -154,6 +154,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
regs->psw = S390_lowcore.svc_old_psw;
regs->int_code = S390_lowcore.svc_int_code;
update_timer_sys();
+ if (static_branch_likely(&cpu_has_bear))
+ current->thread.last_break = regs->last_break;
local_irq_enable();
regs->orig_gpr2 = regs->gprs[2];
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index bcefc2173de4..6c6f7dcce1a5 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -300,7 +300,6 @@ static void (*pgm_check_table[128])(struct pt_regs *regs);
void noinstr __do_pgm_check(struct pt_regs *regs)
{
- unsigned long last_break = S390_lowcore.breaking_event_addr;
unsigned int trapnr;
irqentry_state_t state;
@@ -311,10 +310,11 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
if (user_mode(regs)) {
update_timer_sys();
- if (last_break < 4096)
- last_break = 1;
- current->thread.last_break = last_break;
- regs->args[0] = last_break;
+ if (!static_branch_likely(&cpu_has_bear)) {
+ if (regs->last_break < 4096)
+ regs->last_break = 1;
+ }
+ current->thread.last_break = regs->last_break;
}
if (S390_lowcore.pgm_code & 0x0200) {
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 63bdb9e1bfc1..42c43521878f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -212,6 +212,7 @@ SECTIONS
QUAD(__dynsym_start) /* dynsym_start */
QUAD(__rela_dyn_start) /* rela_dyn_start */
QUAD(__rela_dyn_end) /* rela_dyn_end */
+ QUAD(_eamode31 - _samode31) /* amode31_size */
} :NONE
/* Debugging sections. */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2245f4b8d362..c3bd993fdd0c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -960,7 +960,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
/* bit 1+2 of the target are the ilc, so we can directly use ilen */
rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
- (u64 *) __LC_LAST_BREAK);
+ (u64 *) __LC_PGM_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 678333936f78..707cd4622c13 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -7,6 +7,8 @@ lib-y += delay.o string.o uaccess.o find.o spinlock.o
obj-y += mem.o xor.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
+obj-$(CONFIG_S390_KPROBES_SANITY_TEST) += test_kprobes_s390.o
+test_kprobes_s390-objs += test_kprobes_asm.o test_kprobes.o
# Instrumenting memory accesses to __user data (in different address space)
# produce false positives
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 9b2dab5a69f9..692dc84cd19c 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -26,7 +26,7 @@ static int __init spin_retry_init(void)
}
early_initcall(spin_retry_init);
-/**
+/*
* spin_retry= parameter
*/
static int __init spin_retry_setup(char *str)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 47080560e0d8..7d8741818239 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -101,32 +101,6 @@ EXPORT_SYMBOL(strcpy);
#endif
/**
- * strlcpy - Copy a %NUL terminated string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Compatible with *BSD: the result is always a valid
- * NUL-terminated string that fits in the buffer (unless,
- * of course, the buffer size is zero). It does not pad
- * out the result like strncpy() does.
- */
-#ifdef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = __strend(src) - src;
-
- if (size) {
- size_t len = (ret >= size) ? size-1 : ret;
- dest[len] = '\0';
- memcpy(dest, src, len);
- }
- return ret;
-}
-EXPORT_SYMBOL(strlcpy);
-#endif
-
-/**
* strncpy - Copy a length-limited, %NUL-terminated string
* @dest: Where to copy the string to
* @src: Where to copy the string from
@@ -254,25 +228,6 @@ int strcmp(const char *s1, const char *s2)
EXPORT_SYMBOL(strcmp);
#endif
-/**
- * strrchr - Find the last occurrence of a character in a string
- * @s: The string to be searched
- * @c: The character to search for
- */
-#ifdef __HAVE_ARCH_STRRCHR
-char *strrchr(const char *s, int c)
-{
- ssize_t len = __strend(s) - s;
-
- do {
- if (s[len] == (char)c)
- return (char *)s + len;
- } while (--len >= 0);
- return NULL;
-}
-EXPORT_SYMBOL(strrchr);
-#endif
-
static inline int clcle(const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
diff --git a/arch/s390/lib/test_kprobes.c b/arch/s390/lib/test_kprobes.c
new file mode 100644
index 000000000000..9e62d62812e5
--- /dev/null
+++ b/arch/s390/lib/test_kprobes.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+#include "test_kprobes.h"
+
+static struct kprobe kp;
+
+static void setup_kprobe(struct kunit *test, struct kprobe *kp,
+ const char *symbol, int offset)
+{
+ kp->offset = offset;
+ kp->addr = NULL;
+ kp->symbol_name = symbol;
+}
+
+static void test_kprobe_offset(struct kunit *test, struct kprobe *kp,
+ const char *target, int offset)
+{
+ int ret;
+
+ setup_kprobe(test, kp, target, 0);
+ ret = register_kprobe(kp);
+ if (!ret)
+ unregister_kprobe(kp);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ setup_kprobe(test, kp, target, offset);
+ ret = register_kprobe(kp);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ if (!ret)
+ unregister_kprobe(kp);
+}
+
+static void test_kprobe_odd(struct kunit *test)
+{
+ test_kprobe_offset(test, &kp, "kprobes_target_odd",
+ kprobes_target_odd_offs);
+}
+
+static void test_kprobe_in_insn4(struct kunit *test)
+{
+ test_kprobe_offset(test, &kp, "kprobes_target_in_insn4",
+ kprobes_target_in_insn4_offs);
+}
+
+static void test_kprobe_in_insn6_lo(struct kunit *test)
+{
+ test_kprobe_offset(test, &kp, "kprobes_target_in_insn6_lo",
+ kprobes_target_in_insn6_lo_offs);
+}
+
+static void test_kprobe_in_insn6_hi(struct kunit *test)
+{
+ test_kprobe_offset(test, &kp, "kprobes_target_in_insn6_hi",
+ kprobes_target_in_insn6_hi_offs);
+}
+
+static struct kunit_case kprobes_testcases[] = {
+ KUNIT_CASE(test_kprobe_odd),
+ KUNIT_CASE(test_kprobe_in_insn4),
+ KUNIT_CASE(test_kprobe_in_insn6_lo),
+ KUNIT_CASE(test_kprobe_in_insn6_hi),
+ {}
+};
+
+static struct kunit_suite kprobes_test_suite = {
+ .name = "kprobes_test_s390",
+ .test_cases = kprobes_testcases,
+};
+
+kunit_test_suites(&kprobes_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/lib/test_kprobes.h b/arch/s390/lib/test_kprobes.h
new file mode 100644
index 000000000000..2b4c9bc337f1
--- /dev/null
+++ b/arch/s390/lib/test_kprobes.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef TEST_KPROBES_H
+#define TEST_KPROBES_H
+
+extern unsigned long kprobes_target_odd_offs;
+extern unsigned long kprobes_target_in_insn4_offs;
+extern unsigned long kprobes_target_in_insn6_lo_offs;
+extern unsigned long kprobes_target_in_insn6_hi_offs;
+
+#endif
diff --git a/arch/s390/lib/test_kprobes_asm.S b/arch/s390/lib/test_kprobes_asm.S
new file mode 100644
index 000000000000..ade7a3042334
--- /dev/null
+++ b/arch/s390/lib/test_kprobes_asm.S
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+#define KPROBES_TARGET_START(name) \
+ SYM_FUNC_START(name); \
+ FTRACE_GEN_NOP_ASM(name)
+
+#define KPROBES_TARGET_END(name) \
+ SYM_FUNC_END(name); \
+ SYM_DATA(name##_offs, .quad 1b - name)
+
+KPROBES_TARGET_START(kprobes_target_in_insn4)
+ .word 0x4700 // bc 0,0
+1: .word 0x0000
+ br %r14
+KPROBES_TARGET_END(kprobes_target_in_insn4)
+
+KPROBES_TARGET_START(kprobes_target_in_insn6_lo)
+ .word 0xe310 // ly 1,0
+1: .word 0x0000
+ .word 0x0058
+ br %r14
+KPROBES_TARGET_END(kprobes_target_in_insn6_lo)
+
+KPROBES_TARGET_START(kprobes_target_in_insn6_hi)
+ .word 0xe310 // ly 1,0
+ .word 0x0000
+1: .word 0x0058
+ br %r14
+KPROBES_TARGET_END(kprobes_target_in_insn6_hi)
+
+KPROBES_TARGET_START(kprobes_target_bp)
+ nop
+ .word 0x0000
+ nop
+1: br %r14
+KPROBES_TARGET_END(kprobes_target_bp)
+
+KPROBES_TARGET_START(kprobes_target_odd)
+ .byte 0x07
+1: .byte 0x07
+ br %r14
+KPROBES_TARGET_END(kprobes_target_odd)
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index ecf327d743a0..cfc5f5557c06 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -3,7 +3,7 @@
* Test module for unwind_for_each_frame
*/
-#define pr_fmt(fmt) "test_unwind: " fmt
+#include <kunit/test.h>
#include <asm/unwind.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
@@ -16,6 +16,8 @@
#include <linux/wait.h>
#include <asm/irq.h>
+struct kunit *current_test;
+
#define BT_BUF_SIZE (PAGE_SIZE * 4)
/*
@@ -29,7 +31,7 @@ static void print_backtrace(char *bt)
p = strsep(&bt, "\n");
if (!p)
break;
- pr_err("%s\n", p);
+ kunit_err(current_test, "%s\n", p);
}
}
@@ -49,7 +51,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC);
if (!bt) {
- pr_err("failed to allocate backtrace buffer\n");
+ kunit_err(current_test, "failed to allocate backtrace buffer\n");
return -ENOMEM;
}
/* Unwind. */
@@ -63,7 +65,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
if (frame_count++ == max_frames)
break;
if (state.reliable && !addr) {
- pr_err("unwind state reliable but addr is 0\n");
+ kunit_err(current_test, "unwind state reliable but addr is 0\n");
ret = -EINVAL;
break;
}
@@ -75,7 +77,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
stack_type_name(state.stack_info.type),
(void *)state.sp, (void *)state.ip);
if (bt_pos >= BT_BUF_SIZE)
- pr_err("backtrace buffer is too small\n");
+ kunit_err(current_test, "backtrace buffer is too small\n");
}
frame_count += 1;
if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
@@ -85,15 +87,15 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
/* Check the results. */
if (unwind_error(&state)) {
- pr_err("unwind error\n");
+ kunit_err(current_test, "unwind error\n");
ret = -EINVAL;
}
if (!seen_func2_func1) {
- pr_err("unwindme_func2 and unwindme_func1 not found\n");
+ kunit_err(current_test, "unwindme_func2 and unwindme_func1 not found\n");
ret = -EINVAL;
}
if (frame_count == max_frames) {
- pr_err("Maximum number of frames exceeded\n");
+ kunit_err(current_test, "Maximum number of frames exceeded\n");
ret = -EINVAL;
}
if (ret)
@@ -166,7 +168,7 @@ static noinline int unwindme_func4(struct unwindme *u)
kp.pre_handler = pgm_pre_handler;
ret = register_kprobe(&kp);
if (ret < 0) {
- pr_err("register_kprobe failed %d\n", ret);
+ kunit_err(current_test, "register_kprobe failed %d\n", ret);
return -EINVAL;
}
@@ -252,7 +254,7 @@ static int test_unwind_irq(struct unwindme *u)
}
/* Spawns a task and passes it to test_unwind(). */
-static int test_unwind_task(struct unwindme *u)
+static int test_unwind_task(struct kunit *test, struct unwindme *u)
{
struct task_struct *task;
int ret;
@@ -267,7 +269,7 @@ static int test_unwind_task(struct unwindme *u)
*/
task = kthread_run(unwindme_func1, u, "%s", __func__);
if (IS_ERR(task)) {
- pr_err("kthread_run() failed\n");
+ kunit_err(test, "kthread_run() failed\n");
return PTR_ERR(task);
}
/*
@@ -282,77 +284,98 @@ static int test_unwind_task(struct unwindme *u)
return ret;
}
-static int test_unwind_flags(int flags)
+struct test_params {
+ int flags;
+ char *name;
+};
+
+/*
+ * Create required parameter list for tests
+ */
+static const struct test_params param_list[] = {
+ {.flags = UWM_DEFAULT, .name = "UWM_DEFAULT"},
+ {.flags = UWM_SP, .name = "UWM_SP"},
+ {.flags = UWM_REGS, .name = "UWM_REGS"},
+ {.flags = UWM_SWITCH_STACK,
+ .name = "UWM_SWITCH_STACK"},
+ {.flags = UWM_SP | UWM_REGS,
+ .name = "UWM_SP | UWM_REGS"},
+ {.flags = UWM_CALLER | UWM_SP,
+ .name = "WM_CALLER | UWM_SP"},
+ {.flags = UWM_CALLER | UWM_SP | UWM_REGS,
+ .name = "UWM_CALLER | UWM_SP | UWM_REGS"},
+ {.flags = UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK,
+ .name = "UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"},
+ {.flags = UWM_THREAD, .name = "UWM_THREAD"},
+ {.flags = UWM_THREAD | UWM_SP,
+ .name = "UWM_THREAD | UWM_SP"},
+ {.flags = UWM_THREAD | UWM_CALLER | UWM_SP,
+ .name = "UWM_THREAD | UWM_CALLER | UWM_SP"},
+ {.flags = UWM_IRQ, .name = "UWM_IRQ"},
+ {.flags = UWM_IRQ | UWM_SWITCH_STACK,
+ .name = "UWM_IRQ | UWM_SWITCH_STACK"},
+ {.flags = UWM_IRQ | UWM_SP,
+ .name = "UWM_IRQ | UWM_SP"},
+ {.flags = UWM_IRQ | UWM_REGS,
+ .name = "UWM_IRQ | UWM_REGS"},
+ {.flags = UWM_IRQ | UWM_SP | UWM_REGS,
+ .name = "UWM_IRQ | UWM_SP | UWM_REGS"},
+ {.flags = UWM_IRQ | UWM_CALLER | UWM_SP,
+ .name = "UWM_IRQ | UWM_CALLER | UWM_SP"},
+ {.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS,
+ .name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS"},
+ {.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK,
+ .name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"},
+ #ifdef CONFIG_KPROBES
+ {.flags = UWM_PGM, .name = "UWM_PGM"},
+ {.flags = UWM_PGM | UWM_SP,
+ .name = "UWM_PGM | UWM_SP"},
+ {.flags = UWM_PGM | UWM_REGS,
+ .name = "UWM_PGM | UWM_REGS"},
+ {.flags = UWM_PGM | UWM_SP | UWM_REGS,
+ .name = "UWM_PGM | UWM_SP | UWM_REGS"},
+ #endif
+};
+
+/*
+ * Parameter description generator: required for KUNIT_ARRAY_PARAM()
+ */
+static void get_desc(const struct test_params *params, char *desc)
+{
+ strscpy(desc, params->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+/*
+ * Create test_unwind_gen_params
+ */
+KUNIT_ARRAY_PARAM(test_unwind, param_list, get_desc);
+
+static void test_unwind_flags(struct kunit *test)
{
struct unwindme u;
+ const struct test_params *params;
- u.flags = flags;
+ current_test = test;
+ params = (const struct test_params *)test->param_value;
+ u.flags = params->flags;
if (u.flags & UWM_THREAD)
- return test_unwind_task(&u);
+ KUNIT_EXPECT_EQ(test, 0, test_unwind_task(test, &u));
else if (u.flags & UWM_IRQ)
- return test_unwind_irq(&u);
+ KUNIT_EXPECT_EQ(test, 0, test_unwind_irq(&u));
else
- return unwindme_func1(&u);
+ KUNIT_EXPECT_EQ(test, 0, unwindme_func1(&u));
}
-static int test_unwind_init(void)
-{
- int failed = 0;
- int total = 0;
-
-#define TEST(flags) \
-do { \
- pr_info("[ RUN ] " #flags "\n"); \
- total++; \
- if (!test_unwind_flags((flags))) { \
- pr_info("[ OK ] " #flags "\n"); \
- } else { \
- pr_err("[ FAILED ] " #flags "\n"); \
- failed++; \
- } \
-} while (0)
-
- pr_info("running stack unwinder tests");
- TEST(UWM_DEFAULT);
- TEST(UWM_SP);
- TEST(UWM_REGS);
- TEST(UWM_SWITCH_STACK);
- TEST(UWM_SP | UWM_REGS);
- TEST(UWM_CALLER | UWM_SP);
- TEST(UWM_CALLER | UWM_SP | UWM_REGS);
- TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK);
- TEST(UWM_THREAD);
- TEST(UWM_THREAD | UWM_SP);
- TEST(UWM_THREAD | UWM_CALLER | UWM_SP);
- TEST(UWM_IRQ);
- TEST(UWM_IRQ | UWM_SWITCH_STACK);
- TEST(UWM_IRQ | UWM_SP);
- TEST(UWM_IRQ | UWM_REGS);
- TEST(UWM_IRQ | UWM_SP | UWM_REGS);
- TEST(UWM_IRQ | UWM_CALLER | UWM_SP);
- TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS);
- TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK);
-#ifdef CONFIG_KPROBES
- TEST(UWM_PGM);
- TEST(UWM_PGM | UWM_SP);
- TEST(UWM_PGM | UWM_REGS);
- TEST(UWM_PGM | UWM_SP | UWM_REGS);
-#endif
-#undef TEST
- if (failed) {
- pr_err("%d of %d stack unwinder tests failed", failed, total);
- WARN(1, "%d of %d stack unwinder tests failed", failed, total);
- } else {
- pr_info("all %d stack unwinder tests passed", total);
- }
+static struct kunit_case unwind_test_cases[] = {
+ KUNIT_CASE_PARAM(test_unwind_flags, test_unwind_gen_params),
+ {}
+};
- return failed ? -EINVAL : 0;
-}
+static struct kunit_suite test_unwind_suite = {
+ .name = "test_unwind",
+ .test_cases = unwind_test_cases,
+};
-static void test_unwind_exit(void)
-{
-}
+kunit_test_suites(&test_unwind_suite);
-module_init(test_unwind_init);
-module_exit(test_unwind_exit);
MODULE_LICENSE("GPL");
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 1141c8d5c0d0..2203164b39da 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -14,8 +14,8 @@
#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <linux/sched.h>
+#include <linux/string_helpers.h>
#include <linux/sysctl.h>
-#include <linux/ctype.h>
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
@@ -394,13 +394,10 @@ static int __init cmm_init(void)
goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
/* convert sender to uppercase characters */
- if (sender) {
- int len = strlen(sender);
- while (len--)
- sender[len] = toupper(sender[len]);
- } else {
+ if (sender)
+ string_upper(sender, sender);
+ else
sender = cmm_default_sender;
- }
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
if (rc < 0)
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 0b0c8c284953..9f9af5298dd6 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -8,6 +8,7 @@
#include <linux/kasan.h>
#include <asm/ptdump.h>
#include <asm/kasan.h>
+#include <asm/nospec-branch.h>
#include <asm/sections.h>
static unsigned long max_addr;
@@ -116,8 +117,13 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
return;
if (st->current_prot & _PAGE_NOEXEC)
return;
- /* The first lowcore page is currently still W+X. */
- if (addr == PAGE_SIZE)
+ /*
+ * The first lowcore page is W+X if spectre mitigations are using
+ * trampolines or the BEAR enhancements facility is not installed,
+ * in which case we have two lpswe instructions in lowcore that need
+ * to be executable.
+ */
+ if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)))
return;
WARN_ONCE(1, "s390/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
@@ -203,7 +209,9 @@ void ptdump_check_wx(void)
if (st.wx_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages);
else
- pr_info("Checked W+X mappings: passed, no unexpected W+X pages found\n");
+ pr_info("Checked W+X mappings: passed, no %sW+X pages found\n",
+ (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) ?
+ "unexpected " : "");
}
#endif /* CONFIG_DEBUG_WX */
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index fdc86c0e4e6c..654019181a37 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -57,7 +57,7 @@ void arch_report_meminfo(struct seq_file *m)
static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
unsigned long dtt)
{
- unsigned long table, mask;
+ unsigned long *table, mask;
mask = 0;
if (MACHINE_HAS_EDAT2) {
@@ -72,7 +72,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
break;
}
- table = (unsigned long)old & mask;
+ table = (unsigned long *)((unsigned long)old & mask);
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
} else if (MACHINE_HAS_IDTE) {
cspg(old, *old, new);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 2b1c6d916cf9..7d9705eeb02f 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -13,6 +13,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
+#include <asm/nospec-branch.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
@@ -584,8 +585,13 @@ void __init vmem_map_init(void)
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
- /* we need lowcore executable for our LPSWE instructions */
- set_memory_x(0, 1);
+ if (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) {
+ /*
+ * Lowcore must be executable for LPSWE
+ * and expoline trampoline branch instructions.
+ */
+ set_memory_x(0, 1);
+ }
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 1a374d021e25..233cc9bcd652 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -567,7 +567,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth);
- if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
+ if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
@@ -585,7 +585,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* br %r14 */
_EMIT2(0x07fe);
- if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
+ if ((nospec_uses_trampoline()) &&
(is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
@@ -1332,7 +1332,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
jit->seen |= SEEN_FUNC;
/* lgrl %w1,func */
EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
- if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
+ if (nospec_uses_trampoline()) {
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
} else {
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 93223bd110c3..1f4540d6bd2d 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -18,6 +18,8 @@
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static int s390_iommu_strict;
+static u64 s390_iommu_aperture;
+static u32 s390_iommu_aperture_factor = 1;
static int zpci_refresh_global(struct zpci_dev *zdev)
{
@@ -565,15 +567,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
/*
* Restrict the iommu bitmap size to the minimum of the following:
- * - main memory size
+ * - s390_iommu_aperture which defaults to high_memory
* - 3-level pagetable address limit minus start_dma offset
* - DMA address range allowed by the hardware (clp query pci fn)
*
* Also set zdev->end_dma to the actual end address of the usable
* range, instead of the theoretical maximum as reported by hardware.
+ *
+ * This limits the number of concurrently usable DMA mappings since
+ * for each DMA mapped memory address we need a DMA address including
+ * extra DMA addresses for multiple mappings of the same memory address.
*/
zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
- zdev->iommu_size = min3((u64) high_memory,
+ zdev->iommu_size = min3(s390_iommu_aperture,
ZPCI_TABLE_SIZE_RT - zdev->start_dma,
zdev->end_dma - zdev->start_dma + 1);
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
@@ -660,6 +666,12 @@ static int __init dma_alloc_cpu_table_caches(void)
int __init zpci_dma_init(void)
{
+ s390_iommu_aperture = (u64)high_memory;
+ if (!s390_iommu_aperture_factor)
+ s390_iommu_aperture = ULONG_MAX;
+ else
+ s390_iommu_aperture *= s390_iommu_aperture_factor;
+
return dma_alloc_cpu_table_caches();
}
@@ -692,3 +704,12 @@ static int __init s390_iommu_setup(char *str)
}
__setup("s390_iommu=", s390_iommu_setup);
+
+static int __init s390_iommu_aperture_setup(char *str)
+{
+ if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
+ s390_iommu_aperture_factor = 1;
+ return 1;
+}
+
+__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 5b8d647523f9..6a5bfa9dc1f2 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -52,6 +52,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
struct pci_dev *pdev = NULL;
+ zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
+ ccdf->fid, ccdf->fh, ccdf->pec);
zpci_err("error CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
@@ -96,6 +98,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
enum zpci_state state;
+ zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
+ ccdf->fid, ccdf->fh, ccdf->pec);
zpci_err("avail CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 335c281811c7..cae280e5c047 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -90,6 +90,14 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
if (zdev_enabled(zdev)) {
ret = zpci_disable_device(zdev);
+ /*
+ * Due to a z/VM vs LPAR inconsistency in the error
+ * state the FH may indicate an enabled device but
+ * disable says the device is already disabled don't
+ * treat it as an error here.
+ */
+ if (ret == -EINVAL)
+ ret = 0;
if (ret)
goto out;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ed322ac00343..95dd1ee01546 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -192,6 +192,8 @@ config X86
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ select HAVE_SAMPLE_FTRACE_DIRECT if X86_64
+ select HAVE_SAMPLE_FTRACE_MULTI_DIRECT if X86_64
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA