summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig30
-rw-r--r--arch/s390/Makefile8
-rw-r--r--arch/s390/appldata/Makefile1
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/appldata/appldata_mem.c1
-rw-r--r--arch/s390/appldata/appldata_net_sum.c1
-rw-r--r--arch/s390/appldata/appldata_os.c1
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/boot/compressed/misc.c2
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr1
-rw-r--r--arch/s390/boot/install.sh5
-rw-r--r--arch/s390/configs/default_defconfig16
-rw-r--r--arch/s390/configs/gcov_defconfig13
-rw-r--r--arch/s390/configs/performance_defconfig13
-rw-r--r--arch/s390/crypto/aes_s390.c303
-rw-r--r--arch/s390/crypto/arch_random.c6
-rw-r--r--arch/s390/crypto/crc32-vx.c1
-rw-r--r--arch/s390/crypto/des_s390.c7
-rw-r--r--arch/s390/crypto/ghash_s390.c1
-rw-r--r--arch/s390/crypto/paes_s390.c6
-rw-r--r--arch/s390/crypto/prng.c1
-rw-r--r--arch/s390/crypto/sha.h7
-rw-r--r--arch/s390/crypto/sha1_s390.c7
-rw-r--r--arch/s390/crypto/sha256_s390.c7
-rw-r--r--arch/s390/crypto/sha512_s390.c7
-rw-r--r--arch/s390/crypto/sha_common.c7
-rw-r--r--arch/s390/defconfig3
-rw-r--r--arch/s390/hypfs/Makefile1
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/Kbuild2
-rw-r--r--arch/s390/include/asm/alternative.h150
-rw-r--r--arch/s390/include/asm/ap.h5
-rw-r--r--arch/s390/include/asm/archrandom.h26
-rw-r--r--arch/s390/include/asm/atomic_ops.h32
-rw-r--r--arch/s390/include/asm/bugs.h1
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/include/asm/compat.h1
-rw-r--r--arch/s390/include/asm/cpacf.h52
-rw-r--r--arch/s390/include/asm/cpu_mf.h13
-rw-r--r--arch/s390/include/asm/ctl_reg.h32
-rw-r--r--arch/s390/include/asm/debug.h190
-rw-r--r--arch/s390/include/asm/dis.h28
-rw-r--r--arch/s390/include/asm/dma-mapping.h5
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/s390/include/asm/futex.h9
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/kprobes.h17
-rw-r--r--arch/s390/include/asm/kvm_host.h34
-rw-r--r--arch/s390/include/asm/kvm_para.h7
-rw-r--r--arch/s390/include/asm/livepatch.h8
-rw-r--r--arch/s390/include/asm/lowcore.h36
-rw-r--r--arch/s390/include/asm/mmu_context.h42
-rw-r--r--arch/s390/include/asm/nmi.h19
-rw-r--r--arch/s390/include/asm/pci_debug.h6
-rw-r--r--arch/s390/include/asm/pci_insn.h2
-rw-r--r--arch/s390/include/asm/perf_event.h18
-rw-r--r--arch/s390/include/asm/pgalloc.h18
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h14
-rw-r--r--arch/s390/include/asm/ptrace.h13
-rw-r--r--arch/s390/include/asm/runtime_instr.h86
-rw-r--r--arch/s390/include/asm/rwsem.h211
-rw-r--r--arch/s390/include/asm/sections.h2
-rw-r--r--arch/s390/include/asm/segment.h1
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/smp.h5
-rw-r--r--arch/s390/include/asm/spinlock.h179
-rw-r--r--arch/s390/include/asm/spinlock_types.h4
-rw-r--r--arch/s390/include/asm/string.h46
-rw-r--r--arch/s390/include/asm/switch_to.h29
-rw-r--r--arch/s390/include/asm/syscall.h5
-rw-r--r--arch/s390/include/asm/sysinfo.h9
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/uaccess.h29
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/asm/vga.h1
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/s390/include/uapi/asm/kvm.h9
-rw-r--r--arch/s390/include/uapi/asm/kvm_para.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm_perf.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm_virtio.h65
-rw-r--r--arch/s390/include/uapi/asm/perf_regs.h44
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h125
-rw-r--r--arch/s390/include/uapi/asm/sthyi.h7
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/include/uapi/asm/virtio-ccw.h6
-rw-r--r--arch/s390/include/uapi/asm/vmcp.h1
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h14
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/alternative.c111
-rw-r--r--arch/s390/kernel/asm-offsets.c7
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kernel/compat_signal.c33
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/debug.c918
-rw-r--r--arch/s390/kernel/dis.c2047
-rw-r--r--arch/s390/kernel/dumpstack.c1
-rw-r--r--arch/s390/kernel/early.c145
-rw-r--r--arch/s390/kernel/entry.S108
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/guarded_storage.c7
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/ipl.c37
-rw-r--r--arch/s390/kernel/kprobes.c22
-rw-r--r--arch/s390/kernel/lgr.c7
-rw-r--r--arch/s390/kernel/machine_kexec.c22
-rw-r--r--arch/s390/kernel/module.c29
-rw-r--r--arch/s390/kernel/nmi.c206
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c5
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c278
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c779
-rw-r--r--arch/s390/kernel/perf_event.c5
-rw-r--r--arch/s390/kernel/perf_regs.c71
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/ptrace.c180
-rw-r--r--arch/s390/kernel/relocate_kernel.S3
-rw-r--r--arch/s390/kernel/runtime_instr.c42
-rw-r--r--arch/s390/kernel/setup.c22
-rw-r--r--arch/s390/kernel/smp.c88
-rw-r--r--arch/s390/kernel/stacktrace.c1
-rw-r--r--arch/s390/kernel/sthyi.c (renamed from arch/s390/kvm/sthyi.c)177
-rw-r--r--arch/s390/kernel/suspend.c8
-rw-r--r--arch/s390/kernel/syscalls.S7
-rw-r--r--arch/s390/kernel/time.c5
-rw-r--r--arch/s390/kernel/topology.c50
-rw-r--r--arch/s390/kernel/vdso.c69
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S5
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S5
-rw-r--r--arch/s390/kernel/vdso32/getcpu.S16
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S5
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S5
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S24
-rw-r--r--arch/s390/kernel/vdso64/getcpu.S15
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S5
-rw-r--r--arch/s390/kernel/vdso64/note.S1
-rw-r--r--arch/s390/kernel/vmlinux.lds.S28
-rw-r--r--arch/s390/kernel/vtime.c1
-rw-r--r--arch/s390/kvm/Makefile7
-rw-r--r--arch/s390/kvm/diag.c5
-rw-r--r--arch/s390/kvm/gaccess.h5
-rw-r--r--arch/s390/kvm/guestdbg.c5
-rw-r--r--arch/s390/kvm/intercept.c61
-rw-r--r--arch/s390/kvm/interrupt.c37
-rw-r--r--arch/s390/kvm/irq.h5
-rw-r--r--arch/s390/kvm/kvm-s390.c62
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/s390/kvm/priv.c18
-rw-r--r--arch/s390/kvm/sigp.c5
-rw-r--r--arch/s390/kvm/vsie.c65
-rw-r--r--arch/s390/lib/mem.S64
-rw-r--r--arch/s390/lib/spinlock.c344
-rw-r--r--arch/s390/lib/string.c28
-rw-r--r--arch/s390/lib/uaccess.c90
-rw-r--r--arch/s390/mm/cmm.c9
-rw-r--r--arch/s390/mm/fault.c110
-rw-r--r--arch/s390/mm/gmap.c10
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/mmap.c16
-rw-r--r--arch/s390/mm/pgalloc.c20
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/mm/vmem.c16
-rw-r--r--arch/s390/net/Makefile1
-rw-r--r--arch/s390/net/bpf_jit.h7
-rw-r--r--arch/s390/net/bpf_jit_comp.c37
-rw-r--r--arch/s390/numa/Makefile1
-rw-r--r--arch/s390/pci/Makefile1
-rw-r--r--arch/s390/pci/pci.c6
-rw-r--r--arch/s390/pci/pci_debug.c1
-rw-r--r--arch/s390/pci/pci_dma.c22
-rw-r--r--arch/s390/pci/pci_insn.c10
-rw-r--r--arch/s390/tools/Makefile10
-rw-r--r--arch/s390/tools/gen_opcode_table.c337
-rw-r--r--arch/s390/tools/opcodes.txt1183
175 files changed, 5735 insertions, 4683 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index eae2c64cf69d..9fdff3fe1a42 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += kernel/
obj-y += mm/
obj-$(CONFIG_KVM) += kvm/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ae55e715cc74..829c67986db7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -68,6 +68,7 @@ config S390
select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
@@ -143,11 +144,11 @@ config S390
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
- select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GCC_PLUGINS
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -158,6 +159,8 @@ config S390
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
@@ -809,18 +812,6 @@ config PFAULT
Everybody who wants to run Linux under VM != VM4.2 should select
this option.
-config SHARED_KERNEL
- bool "VM shared kernel support"
- depends on !JUMP_LABEL
- help
- Select this option, if you want to share the text segment of the
- Linux kernel between different VM guests. This reduces memory
- usage with lots of guests but greatly increases kernel size.
- Also if a kernel was IPL'ed from a shared segment the kexec system
- call will not work.
- You should only select this option if you know what you are
- doing and want to exploit this feature.
-
config CMM
def_tristate n
prompt "Cooperative memory management"
@@ -930,17 +921,4 @@ config S390_GUEST
Select this option if you want to run the kernel as a guest under
the KVM hypervisor.
-config S390_GUEST_OLD_TRANSPORT
- def_bool y
- prompt "Guest support for old s390 virtio transport (DEPRECATED)"
- depends on S390_GUEST
- help
- Enable this option to add support for the old s390-virtio
- transport (i.e. virtio devices NOT based on virtio-ccw). This
- type of virtio devices is only available on the experimental
- kuli userspace or with old (< 2.6) qemu. If you are running
- with a modern version of qemu (which supports virtio-ccw since
- 1.4 and uses it by default since version 2.4), you probably won't
- need this.
-
endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index dac821cfcd43..de54cfc6109d 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# s390/Makefile
#
@@ -6,10 +7,6 @@
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
# Copyright (C) 1994 by Linus Torvalds
#
@@ -21,7 +18,7 @@ KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x
STACK_SIZE := 16384
-CHECKFLAGS += -D__s390__ -D__s390x__
+CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian
export LD_BFD
@@ -133,6 +130,7 @@ archclean:
archprepare:
$(Q)$(MAKE) $(build)=$(tools) include/generated/facilities.h
+ $(Q)$(MAKE) $(build)=$(tools) include/generated/dis.h
# Don't use tabs in echo arguments
define archhelp
diff --git a/arch/s390/appldata/Makefile b/arch/s390/appldata/Makefile
index 99f1cf071304..b06def4a4f2f 100644
--- a/arch/s390/appldata/Makefile
+++ b/arch/s390/appldata/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux - z/VM Monitor Stream.
#
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ef3fb1b9201f..cb6e8066b1ad 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
* Exports appldata_register_ops() and appldata_unregister_ops() for the
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 598df5708501..e68136c3c23a 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management.
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 66037d2622b4..8bc14b0d1def 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects accumulated network statistics (Packets received/transmitted,
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 45b3178200ab..433a994b1a89 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects misc. OS related data (CPU utilization, running processes).
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3df10c989893..29e3dc99b916 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -12,7 +12,7 @@ targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
-KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 77633200f42c..cecf38b9ec82 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -170,9 +170,7 @@ unsigned long decompress_kernel(void)
free_mem_ptr = (unsigned long) &_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
- puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr
index f02382ae5c48..42a242597f34 100644
--- a/arch/s390/boot/compressed/vmlinux.scr
+++ b/arch/s390/boot/compressed/vmlinux.scr
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
SECTIONS
{
.rodata.compressed : {
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index aed3069699bd..bed227f267ae 100644
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -1,11 +1,8 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
#
# arch/s390x/boot/install.sh
#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
# Copyright (C) 1995 by Linus Torvalds
#
# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 282072206df7..5af8458951cf 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
@@ -379,7 +378,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
@@ -416,7 +414,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -483,6 +480,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -599,7 +598,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -629,10 +627,9 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
+CONFIG_DMA_API_DEBUG=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
@@ -641,14 +638,13 @@ CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -663,13 +659,11 @@ CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -705,12 +699,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 3c6b78189fbc..d52eafe57ae8 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -70,7 +70,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
@@ -376,7 +375,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
@@ -412,7 +410,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -479,6 +476,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -575,10 +574,8 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
@@ -590,7 +587,6 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -608,13 +604,10 @@ CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -650,12 +643,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 653d72bcc007..20ed149e1137 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -68,7 +68,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
@@ -374,7 +373,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
@@ -410,7 +408,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -477,6 +474,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -573,10 +572,8 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
@@ -588,7 +585,6 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -606,13 +602,10 @@ CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -648,12 +641,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 591cbdf615af..d60798737d86 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1,20 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the AES Cipher Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005, 2007
+ * Copyright IBM Corp. 2005, 2017
* Author(s): Jan Glauber (jang@de.ibm.com)
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
+ * Patrick Steuer <patrick.steuer@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
*
* Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#define KMSG_COMPONENT "aes_s390"
@@ -22,20 +19,25 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/ghash.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/fips.h>
+#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
-static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
+ kma_functions;
struct s390_aes_ctx {
u8 key[AES_MAX_KEY_SIZE];
@@ -55,6 +57,17 @@ struct s390_xts_ctx {
struct crypto_skcipher *fallback;
};
+struct gcm_sg_walk {
+ struct scatter_walk walk;
+ unsigned int walk_bytes;
+ u8 *walk_ptr;
+ unsigned int walk_bytes_remain;
+ u8 buf[AES_BLOCK_SIZE];
+ unsigned int buf_bytes;
+ u8 *ptr;
+ unsigned int nbytes;
+};
+
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -771,6 +784,267 @@ static struct crypto_alg ctr_aes_alg = {
}
};
+static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->fc = CPACF_KMA_GCM_AES_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->fc = CPACF_KMA_GCM_AES_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->fc = CPACF_KMA_GCM_AES_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+ return 0;
+}
+
+static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
+ unsigned int len)
+{
+ memset(gw, 0, sizeof(*gw));
+ gw->walk_bytes_remain = len;
+ scatterwalk_start(&gw->walk, sg);
+}
+
+static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+{
+ int n;
+
+ /* minbytesneeded <= AES_BLOCK_SIZE */
+ if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
+ gw->ptr = gw->buf;
+ gw->nbytes = gw->buf_bytes;
+ goto out;
+ }
+
+ if (gw->walk_bytes_remain == 0) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
+ }
+
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
+ if (!gw->walk_bytes) {
+ scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ }
+ gw->walk_ptr = scatterwalk_map(&gw->walk);
+
+ if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
+ gw->ptr = gw->walk_ptr;
+ gw->nbytes = gw->walk_bytes;
+ goto out;
+ }
+
+ while (1) {
+ n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
+ memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
+ gw->buf_bytes += n;
+ gw->walk_bytes_remain -= n;
+ scatterwalk_unmap(&gw->walk);
+ scatterwalk_advance(&gw->walk, n);
+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+
+ if (gw->buf_bytes >= minbytesneeded) {
+ gw->ptr = gw->buf;
+ gw->nbytes = gw->buf_bytes;
+ goto out;
+ }
+
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ if (!gw->walk_bytes) {
+ scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ }
+ gw->walk_ptr = scatterwalk_map(&gw->walk);
+ }
+
+out:
+ return gw->nbytes;
+}
+
+static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
+ int n;
+
+ if (gw->ptr == NULL)
+ return;
+
+ if (gw->ptr == gw->buf) {
+ n = gw->buf_bytes - bytesdone;
+ if (n > 0) {
+ memmove(gw->buf, gw->buf + bytesdone, n);
+ gw->buf_bytes -= n;
+ } else
+ gw->buf_bytes = 0;
+ } else {
+ gw->walk_bytes_remain -= bytesdone;
+ scatterwalk_unmap(&gw->walk);
+ scatterwalk_advance(&gw->walk, bytesdone);
+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+ }
+}
+
+static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int taglen = crypto_aead_authsize(tfm);
+ unsigned int aadlen = req->assoclen;
+ unsigned int pclen = req->cryptlen;
+ int ret = 0;
+
+ unsigned int len, in_bytes, out_bytes,
+ min_bytes, bytes, aad_bytes, pc_bytes;
+ struct gcm_sg_walk gw_in, gw_out;
+ u8 tag[GHASH_DIGEST_SIZE];
+
+ struct {
+ u32 _[3]; /* reserved */
+ u32 cv; /* Counter Value */
+ u8 t[GHASH_DIGEST_SIZE];/* Tag */
+ u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
+ u64 taadl; /* Total AAD Length */
+ u64 tpcl; /* Total Plain-/Cipher-text Length */
+ u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
+ u8 k[AES_MAX_KEY_SIZE]; /* Key */
+ } param;
+
+ /*
+ * encrypt
+ * req->src: aad||plaintext
+ * req->dst: aad||ciphertext||tag
+ * decrypt
+ * req->src: aad||ciphertext||tag
+ * req->dst: aad||plaintext, return 0 or -EBADMSG
+ * aad, plaintext and ciphertext may be empty.
+ */
+ if (flags & CPACF_DECRYPT)
+ pclen -= taglen;
+ len = aadlen + pclen;
+
+ memset(&param, 0, sizeof(param));
+ param.cv = 1;
+ param.taadl = aadlen * 8;
+ param.tpcl = pclen * 8;
+ memcpy(param.j0, req->iv, ivsize);
+ *(u32 *)(param.j0 + ivsize) = 1;
+ memcpy(param.k, ctx->key, ctx->key_len);
+
+ gcm_sg_walk_start(&gw_in, req->src, len);
+ gcm_sg_walk_start(&gw_out, req->dst, len);
+
+ do {
+ min_bytes = min_t(unsigned int,
+ aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
+ in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
+ out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
+ bytes = min(in_bytes, out_bytes);
+
+ if (aadlen + pclen <= bytes) {
+ aad_bytes = aadlen;
+ pc_bytes = pclen;
+ flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
+ } else {
+ if (aadlen <= bytes) {
+ aad_bytes = aadlen;
+ pc_bytes = (bytes - aadlen) &
+ ~(AES_BLOCK_SIZE - 1);
+ flags |= CPACF_KMA_LAAD;
+ } else {
+ aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
+ pc_bytes = 0;
+ }
+ }
+
+ if (aad_bytes > 0)
+ memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
+
+ cpacf_kma(ctx->fc | flags, &param,
+ gw_out.ptr + aad_bytes,
+ gw_in.ptr + aad_bytes, pc_bytes,
+ gw_in.ptr, aad_bytes);
+
+ gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
+ gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
+ aadlen -= aad_bytes;
+ pclen -= pc_bytes;
+ } while (aadlen + pclen > 0);
+
+ if (flags & CPACF_DECRYPT) {
+ scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
+ if (crypto_memneq(tag, param.t, taglen))
+ ret = -EBADMSG;
+ } else
+ scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
+
+ memzero_explicit(&param, sizeof(param));
+ return ret;
+}
+
+static int gcm_aes_encrypt(struct aead_request *req)
+{
+ return gcm_aes_crypt(req, CPACF_ENCRYPT);
+}
+
+static int gcm_aes_decrypt(struct aead_request *req)
+{
+ return gcm_aes_crypt(req, CPACF_DECRYPT);
+}
+
+static struct aead_alg gcm_aes_aead = {
+ .setkey = gcm_aes_setkey,
+ .setauthsize = gcm_aes_setauthsize,
+ .encrypt = gcm_aes_encrypt,
+ .decrypt = gcm_aes_decrypt,
+
+ .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_priority = 900,
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-s390",
+ .cra_module = THIS_MODULE,
+ },
+};
+
static struct crypto_alg *aes_s390_algs_ptr[5];
static int aes_s390_algs_num;
@@ -790,16 +1064,19 @@ static void aes_s390_fini(void)
crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
+
+ crypto_unregister_aead(&gcm_aes_aead);
}
static int __init aes_s390_init(void)
{
int ret;
- /* Query available functions for KM, KMC and KMCTR */
+ /* Query available functions for KM, KMC, KMCTR and KMA */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
+ cpacf_query(CPACF_KMA, &kma_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
@@ -840,6 +1117,14 @@ static int __init aes_s390_init(void)
goto out_err;
}
+ if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
+ cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
+ cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
+ ret = crypto_register_aead(&gcm_aes_aead);
+ if (ret)
+ goto out_err;
+ }
+
return 0;
out_err:
aes_s390_fini();
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index 36aefc07d10c..8720e9203ecf 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017
* Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 992e630c227b..436865926c26 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Crypto-API module for CRC-32 algorithms implemented with the
* z/Architecture Vector Extension Facility.
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0d296662bbf0..5346b5a80bb6 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
@@ -6,12 +7,6 @@
* Copyright IBM Corp. 2003, 2011
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/init.h>
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 564616d48d8b..3b7f96c9eead 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index a4e903ed7e21..003932db8d12 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -7,11 +8,6 @@
* Copyright IBM Corp. 2017
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
*/
#define KMSG_COMPONENT "paes_s390"
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 3e47c4a0f18b..a97a1802cfb4 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2006, 2015
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index 10f200790079..d6f8258b44df 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Cryptographic API.
*
@@ -5,12 +6,6 @@
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#ifndef _CRYPTO_ARCH_S390_SHA_H
#define _CRYPTO_ARCH_S390_SHA_H
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index c7de53d8da75..a00c17f761c1 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
@@ -16,12 +17,6 @@
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 53c277999a28..944aa6b237cd 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
@@ -6,12 +7,6 @@
* s390 Version:
* Copyright IBM Corp. 2005, 2011
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 2f4caa1ef123..b17eded532b1 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
@@ -5,12 +6,6 @@
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index c740f77285b2..cf0718d121bc 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
@@ -5,12 +6,6 @@
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 20244a38c886..46a3178d8bc6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -53,7 +53,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -163,7 +162,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
@@ -179,7 +177,6 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 2ee25ba252d6..06f601509ce9 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux hypfs filesystem routines.
#
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index cf8a2d92467f..43bbe63e2992 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Hypervisor filesystem for Linux on s390.
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
- * License: GPL
*/
#define KMSG_COMPONENT "hypfs"
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 6e2c9f7e47fa..048450869328 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
generic-y += cacheflush.h
generic-y += clkdev.h
@@ -15,6 +16,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
+generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..c2cf7bcdef9b
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c02f4aba88a6..cfce6835b109 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Adjunct processor (AP) interfaces
*
* Copyright IBM Corp. 2017
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Tony Krowiak <akrowia@linux.vnet.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index e9f7d7a57f99..09aed1095336 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -28,42 +28,42 @@ static void s390_arch_random_generate(u8 *buf, unsigned int nbytes)
static inline bool arch_has_random(void)
{
- if (static_branch_likely(&s390_arch_random_available))
- return true;
return false;
}
static inline bool arch_has_random_seed(void)
{
- return arch_has_random();
+ if (static_branch_likely(&s390_arch_random_available))
+ return true;
+ return false;
}
static inline bool arch_get_random_long(unsigned long *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
- s390_arch_random_generate((u8 *)v, sizeof(*v));
- return true;
- }
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
- s390_arch_random_generate((u8 *)v, sizeof(*v));
- return true;
- }
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
{
- return arch_get_random_long(v);
+ if (static_branch_likely(&s390_arch_random_available)) {
+ s390_arch_random_generate((u8 *)v, sizeof(*v));
+ return true;
+ }
+ return false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
- return arch_get_random_int(v);
+ if (static_branch_likely(&s390_arch_random_available)) {
+ s390_arch_random_generate((u8 *)v, sizeof(*v));
+ return true;
+ }
+ return false;
}
#endif /* CONFIG_ARCH_RANDOM */
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index f479e4c0b87e..d3f09526ee19 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -40,19 +40,24 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
#undef __ATOMIC_OPS
#undef __ATOMIC_OP
-static inline void __atomic_add_const(int val, int *ptr)
-{
- asm volatile(
- " asi %[ptr],%[val]\n"
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
+static inline void op_name(op_type val, op_type *ptr) \
+{ \
+ asm volatile( \
+ op_string " %[ptr],%[val]\n" \
+ op_barrier \
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
}
-static inline void __atomic64_add_const(long val, long *ptr)
-{
- asm volatile(
- " agsi %[ptr],%[val]\n"
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
-}
+#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
+ __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
+__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
+
+#undef __ATOMIC_CONST_OPS
+#undef __ATOMIC_CONST_OP
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
@@ -108,6 +113,11 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
+#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
+#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
+#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
+#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
+
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
diff --git a/arch/s390/include/asm/bugs.h b/arch/s390/include/asm/bugs.h
index 0f5bd894f4dc..aa42a179be33 100644
--- a/arch/s390/include/asm/bugs.h
+++ b/arch/s390/include/asm/bugs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* S390 version
* Copyright IBM Corp. 1999
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index b00777ce93b4..99aa817dad32 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -42,6 +42,7 @@ struct ccwgroup_device {
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @driver: embedded driver structure
+ * @ccw_driver: supported ccw_driver (optional)
*/
struct ccwgroup_driver {
int (*setup) (struct ccwgroup_device *);
@@ -56,6 +57,7 @@ struct ccwgroup_driver {
int (*restore)(struct ccwgroup_device *);
struct device_driver driver;
+ struct ccw_driver *ccw_driver;
};
extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 1b60eb3676d5..5e6a63641a5f 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -263,7 +263,6 @@ typedef struct compat_siginfo {
#define si_overrun _sifields._timer._overrun
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 056670ebba67..3cc52e37b4b2 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -2,7 +2,7 @@
/*
* CP Assist for Cryptographic Functions (CPACF)
*
- * Copyright IBM Corp. 2003, 2016
+ * Copyright IBM Corp. 2003, 2017
* Author(s): Thomas Spatzier
* Jan Glauber
* Harald Freudenberger (freude@de.ibm.com)
@@ -134,6 +134,22 @@
#define CPACF_PRNO_TRNG_Q_R2C_RATIO 0x70
#define CPACF_PRNO_TRNG 0x72
+/*
+ * Function codes for the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KMA_QUERY 0x00
+#define CPACF_KMA_GCM_AES_128 0x12
+#define CPACF_KMA_GCM_AES_192 0x13
+#define CPACF_KMA_GCM_AES_256 0x14
+
+/*
+ * Flags for the KMA (CIPHER MESSAGE WITH AUTHENTICATION) instruction
+ */
+#define CPACF_KMA_LPC 0x100 /* Last-Plaintext/Ciphertext */
+#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
+#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
/**
@@ -179,6 +195,8 @@ static inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(77); /* check for MSA4 */
case CPACF_PRNO:
return test_facility(57); /* check for MSA5 */
+ case CPACF_KMA:
+ return test_facility(146); /* check for MSA8 */
default:
BUG();
}
@@ -470,4 +488,36 @@ static inline void cpacf_pckmo(long func, void *param)
: "cc", "memory");
}
+/**
+ * cpacf_kma() - executes the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
+ * instruction
+ * @func: the function code passed to KMA; see CPACF_KMA_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @aad: address of additional authenticated data memory area
+ * @aad_len: length of aad operand in bytes
+ */
+static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
+ const u8 *src, unsigned long src_len,
+ const u8 *aad, unsigned long aad_len)
+{
+ register unsigned long r0 asm("0") = (unsigned long) func;
+ register unsigned long r1 asm("1") = (unsigned long) param;
+ register unsigned long r2 asm("2") = (unsigned long) src;
+ register unsigned long r3 asm("3") = (unsigned long) src_len;
+ register unsigned long r4 asm("4") = (unsigned long) aad;
+ register unsigned long r5 asm("5") = (unsigned long) aad_len;
+ register unsigned long r6 asm("6") = (unsigned long) dest;
+
+ asm volatile(
+ "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
+ " brc 1,0b\n" /* handle partial completion */
+ : [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
+ [aad] "+a" (r4), [alen] "+d" (r5)
+ : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
+ : "cc", "memory");
+}
+
#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 05480e4cc5ca..dd08db491b89 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CPU-measurement facilities
*
* Copyright IBM Corp. 2012
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef _ASM_S390_CPU_MF_H
#define _ASM_S390_CPU_MF_H
@@ -144,6 +141,12 @@ struct hws_trailer_entry {
unsigned long long progusage2; /* */
} __packed;
+/* Load program parameter */
+static inline void lpp(void *pp)
+{
+ asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory");
+}
+
/* Query counter information */
static inline int qctri(struct cpumf_ctr_info *info)
{
@@ -167,7 +170,7 @@ static inline int lcctl(u64 ctl)
" .insn s,0xb2840000,%1\n"
" ipm %0\n"
" srl %0,28\n"
- : "=d" (cc) : "m" (ctl) : "cc");
+ : "=d" (cc) : "Q" (ctl) : "cc");
return cc;
}
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 93e0d72f6c94..99c93d0346f9 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,6 +8,18 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
+#include <linux/const.h>
+
+#define CR2_GUARDED_STORAGE _BITUL(63 - 59)
+
+#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35)
+#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36)
+#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38)
+#define CR14_WARNING_SUBMASK _BITUL(63 - 39)
+
+#ifndef __ASSEMBLY__
+
#include <linux/bug.h>
#define __ctl_load(array, low, high) do { \
@@ -55,7 +67,11 @@ void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
- unsigned long : 32;
+ unsigned long : 8;
+ unsigned long tcx : 1; /* Transactional-Execution control */
+ unsigned long pifo : 1; /* Transactional-Execution Program-
+ Interruption-Filtering Override */
+ unsigned long : 22;
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
@@ -71,6 +87,19 @@ union ctlreg0 {
};
};
+union ctlreg2 {
+ unsigned long val;
+ struct {
+ unsigned long : 33;
+ unsigned long ducto : 25;
+ unsigned long : 1;
+ unsigned long gse : 1;
+ unsigned long : 1;
+ unsigned long tds : 1;
+ unsigned long tdc : 2;
+ };
+};
+
#ifdef CONFIG_SMP
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
@@ -79,4 +108,5 @@ union ctlreg0 {
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#endif
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index a4ed25dd3278..c305d39f5016 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -14,71 +14,71 @@
#include <linux/refcount.h>
#include <uapi/asm/debug.h>
-#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
-#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
-#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
-#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
-#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
-#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
+#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
+#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
+#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
+#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
+#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
+#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
-#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
- /* the entry information */
+#define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */
+ /* the entry information */
typedef struct __debug_entry debug_entry_t;
struct debug_view;
-typedef struct debug_info {
- struct debug_info* next;
- struct debug_info* prev;
+typedef struct debug_info {
+ struct debug_info *next;
+ struct debug_info *prev;
refcount_t ref_count;
- spinlock_t lock;
+ spinlock_t lock;
int level;
int nr_areas;
int pages_per_area;
int buf_size;
- int entry_size;
- debug_entry_t*** areas;
+ int entry_size;
+ debug_entry_t ***areas;
int active_area;
int *active_pages;
int *active_entries;
- struct dentry* debugfs_root_entry;
- struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
- struct debug_view* views[DEBUG_MAX_VIEWS];
+ struct dentry *debugfs_root_entry;
+ struct dentry *debugfs_entries[DEBUG_MAX_VIEWS];
+ struct debug_view *views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
umode_t mode;
} debug_info_t;
-typedef int (debug_header_proc_t) (debug_info_t* id,
- struct debug_view* view,
+typedef int (debug_header_proc_t) (debug_info_t *id,
+ struct debug_view *view,
int area,
- debug_entry_t* entry,
- char* out_buf);
-
-typedef int (debug_format_proc_t) (debug_info_t* id,
- struct debug_view* view, char* out_buf,
- const char* in_buf);
-typedef int (debug_prolog_proc_t) (debug_info_t* id,
- struct debug_view* view,
- char* out_buf);
-typedef int (debug_input_proc_t) (debug_info_t* id,
- struct debug_view* view,
- struct file* file,
+ debug_entry_t *entry,
+ char *out_buf);
+
+typedef int (debug_format_proc_t) (debug_info_t *id,
+ struct debug_view *view, char *out_buf,
+ const char *in_buf);
+typedef int (debug_prolog_proc_t) (debug_info_t *id,
+ struct debug_view *view,
+ char *out_buf);
+typedef int (debug_input_proc_t) (debug_info_t *id,
+ struct debug_view *view,
+ struct file *file,
const char __user *user_buf,
- size_t in_buf_size, loff_t* offset);
+ size_t in_buf_size, loff_t *offset);
+
+int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf);
-int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
- int area, debug_entry_t* entry, char* out_buf);
-
struct debug_view {
char name[DEBUG_MAX_NAME_LEN];
- debug_prolog_proc_t* prolog_proc;
- debug_header_proc_t* header_proc;
- debug_format_proc_t* format_proc;
- debug_input_proc_t* input_proc;
- void* private_data;
+ debug_prolog_proc_t *prolog_proc;
+ debug_header_proc_t *header_proc;
+ debug_format_proc_t *format_proc;
+ debug_input_proc_t *input_proc;
+ void *private_data;
};
extern struct debug_view debug_hex_ascii_view;
@@ -87,65 +87,67 @@ extern struct debug_view debug_sprintf_view;
/* do NOT use the _common functions */
-debug_entry_t* debug_event_common(debug_info_t* id, int level,
- const void* data, int length);
+debug_entry_t *debug_event_common(debug_info_t *id, int level,
+ const void *data, int length);
-debug_entry_t* debug_exception_common(debug_info_t* id, int level,
- const void* data, int length);
+debug_entry_t *debug_exception_common(debug_info_t *id, int level,
+ const void *data, int length);
/* Debug Feature API: */
debug_info_t *debug_register(const char *name, int pages, int nr_areas,
- int buf_size);
+ int buf_size);
debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
int buf_size, umode_t mode, uid_t uid,
gid_t gid);
-void debug_unregister(debug_info_t* id);
+void debug_unregister(debug_info_t *id);
-void debug_set_level(debug_info_t* id, int new_level);
+void debug_set_level(debug_info_t *id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
-static inline bool debug_level_enabled(debug_info_t* id, int level)
+static inline bool debug_level_enabled(debug_info_t *id, int level)
{
return level <= id->level;
}
-static inline debug_entry_t*
-debug_event(debug_info_t* id, int level, void* data, int length)
+static inline debug_entry_t *debug_event(debug_info_t *id, int level,
+ void *data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,data,length);
+ return debug_event_common(id, level, data, length);
}
-static inline debug_entry_t*
-debug_int_event(debug_info_t* id, int level, unsigned int tag)
+static inline debug_entry_t *debug_int_event(debug_info_t *id, int level,
+ unsigned int tag)
{
- unsigned int t=tag;
+ unsigned int t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,&t,sizeof(unsigned int));
+ return debug_event_common(id, level, &t, sizeof(unsigned int));
}
-static inline debug_entry_t *
-debug_long_event (debug_info_t* id, int level, unsigned long tag)
+static inline debug_entry_t *debug_long_event(debug_info_t *id, int level,
+ unsigned long tag)
{
- unsigned long t=tag;
+ unsigned long t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,&t,sizeof(unsigned long));
+ return debug_event_common(id, level, &t, sizeof(unsigned long));
}
-static inline debug_entry_t*
-debug_text_event(debug_info_t* id, int level, const char* txt)
+static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
+ const char *txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,txt,strlen(txt));
+ return debug_event_common(id, level, txt, strlen(txt));
}
/*
@@ -161,6 +163,7 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
debug_entry_t *__ret; \
debug_info_t *__id = _id; \
int __level = _level; \
+ \
if ((!__id) || (__level > __id->level)) \
__ret = NULL; \
else \
@@ -169,38 +172,40 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
__ret; \
})
-static inline debug_entry_t*
-debug_exception(debug_info_t* id, int level, void* data, int length)
+static inline debug_entry_t *debug_exception(debug_info_t *id, int level,
+ void *data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,data,length);
+ return debug_exception_common(id, level, data, length);
}
-static inline debug_entry_t*
-debug_int_exception(debug_info_t* id, int level, unsigned int tag)
+static inline debug_entry_t *debug_int_exception(debug_info_t *id, int level,
+ unsigned int tag)
{
- unsigned int t=tag;
+ unsigned int t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,&t,sizeof(unsigned int));
+ return debug_exception_common(id, level, &t, sizeof(unsigned int));
}
-static inline debug_entry_t *
-debug_long_exception (debug_info_t* id, int level, unsigned long tag)
+static inline debug_entry_t *debug_long_exception (debug_info_t *id, int level,
+ unsigned long tag)
{
- unsigned long t=tag;
+ unsigned long t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,&t,sizeof(unsigned long));
+ return debug_exception_common(id, level, &t, sizeof(unsigned long));
}
-static inline debug_entry_t*
-debug_text_exception(debug_info_t* id, int level, const char* txt)
+static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
+ const char *txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,txt,strlen(txt));
+ return debug_exception_common(id, level, txt, strlen(txt));
}
/*
@@ -216,6 +221,7 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
debug_entry_t *__ret; \
debug_info_t *__id = _id; \
int __level = _level; \
+ \
if ((!__id) || (__level > __id->level)) \
__ret = NULL; \
else \
@@ -224,13 +230,13 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
__ret; \
})
-int debug_register_view(debug_info_t* id, struct debug_view* view);
-int debug_unregister_view(debug_info_t* id, struct debug_view* view);
+int debug_register_view(debug_info_t *id, struct debug_view *view);
+int debug_unregister_view(debug_info_t *id, struct debug_view *view);
/*
define the debug levels:
- 0 No debugging output to console or syslog
- - 1 Log internal errors to syslog, ignore check conditions
+ - 1 Log internal errors to syslog, ignore check conditions
- 2 Log internal errors and check conditions to syslog
- 3 Log internal errors to console, log check conditions to syslog
- 4 Log internal errors and check conditions to console
@@ -248,17 +254,17 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view);
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
#if DEBUG_LEVEL > 0
-#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
-#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
-#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
-#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_INFO(x...) printk(KERN_INFO PRINTK_HEADER x)
+#define PRINT_WARN(x...) printk(KERN_WARNING PRINTK_HEADER x)
+#define PRINT_ERR(x...) printk(KERN_ERR PRINTK_HEADER x)
+#define PRINT_FATAL(x...) panic(PRINTK_HEADER x)
#else
-#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#endif /* DASD_DEBUG */
-
-#endif /* DEBUG_H */
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_INFO(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_WARN(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_ERR(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#endif /* DASD_DEBUG */
+
+#endif /* DEBUG_H */
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 78d1b2d725b9..b0480c60a8e1 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -9,32 +9,7 @@
#ifndef __ASM_S390_DIS_H__
#define __ASM_S390_DIS_H__
-/* Type of operand */
-#define OPERAND_GPR 0x1 /* Operand printed as %rx */
-#define OPERAND_FPR 0x2 /* Operand printed as %fx */
-#define OPERAND_AR 0x4 /* Operand printed as %ax */
-#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_VR 0x10 /* Operand printed as %vx */
-#define OPERAND_DISP 0x20 /* Operand printed as displacement */
-#define OPERAND_BASE 0x40 /* Operand printed as base register */
-#define OPERAND_INDEX 0x80 /* Operand printed as index register */
-#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
-
-
-struct s390_operand {
- int bits; /* The number of bits in the operand. */
- int shift; /* The number of bits to shift. */
- int flags; /* One bit syntax flags. */
-};
-
-struct s390_insn {
- const char name[5];
- unsigned char opfrag;
- unsigned char format;
-};
-
+#include <generated/dis.h>
static inline int insn_length(unsigned char code)
{
@@ -45,7 +20,6 @@ struct pt_regs;
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
-int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
struct s390_insn *find_insn(unsigned char *code);
static inline int is_known_insn(unsigned char *code)
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 8fc8764fe5ee..eaf490f9c5bc 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -16,11 +16,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_noop_ops;
}
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 9a3cb3983c01..1a61b1b997f2 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -194,13 +194,14 @@ struct arch_elf_state {
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
- 0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. 64-bit
+ tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 9b5a3469fed9..5e97a4353147 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -26,9 +26,9 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
int oldval = 0, newval, ret;
+ mm_segment_t old_fs;
- load_kernel_asce();
-
+ old_fs = enable_sacf_uaccess();
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -55,6 +55,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
ret = -ENOSYS;
}
pagefault_enable();
+ disable_sacf_uaccess(old_fs);
if (!ret)
*oval = oldval;
@@ -65,9 +66,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ mm_segment_t old_fs;
int ret;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
@@ -77,6 +79,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory");
+ disable_sacf_uaccess(old_fs);
*uval = oldval;
return ret;
}
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 5a8d92758a58..186c7b5f5511 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -13,6 +13,8 @@
#include <asm/cio.h>
#include <asm/setup.h>
+#define NSS_NAME_SIZE 8
+
#define IPL_PARMBLOCK_ORIGIN 0x2000
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
@@ -106,7 +108,6 @@ extern size_t append_ipl_scpdata(char *, size_t);
enum {
IPL_DEVNO_VALID = 1,
IPL_PARMBLOCK_VALID = 2,
- IPL_NSS_VALID = 4,
};
enum ipl_type {
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 28792ef82c83..13de80cf741c 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _ASM_S390_KPROBES_H
#define _ASM_S390_KPROBES_H
/*
* Kernel Probes (KProbes)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright IBM Corp. 2002, 2006
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
@@ -63,8 +50,6 @@ typedef u16 kprobe_opcode_t;
#define kretprobe_blacklist_size 0
-#define KPROBE_SWAP_INST 0x10
-
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 51375e766e90..c1b0a9ac1dc8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for kernel virtual machines on s390
*
* Copyright IBM Corp. 2008, 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
@@ -210,7 +207,8 @@ struct kvm_s390_sie_block {
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
- __u8 reserved60; /* 0x0060 */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
#define ECB_GS 0x40
#define ECB_TE 0x10
#define ECB_SRSI 0x04
@@ -685,11 +683,28 @@ struct kvm_s390_crypto {
__u8 dea_kw;
};
+#define APCB0_MASK_SIZE 1
+struct kvm_s390_apcb0 {
+ __u64 apm[APCB0_MASK_SIZE]; /* 0x0000 */
+ __u64 aqm[APCB0_MASK_SIZE]; /* 0x0008 */
+ __u64 adm[APCB0_MASK_SIZE]; /* 0x0010 */
+ __u64 reserved18; /* 0x0018 */
+};
+
+#define APCB1_MASK_SIZE 4
+struct kvm_s390_apcb1 {
+ __u64 apm[APCB1_MASK_SIZE]; /* 0x0000 */
+ __u64 aqm[APCB1_MASK_SIZE]; /* 0x0020 */
+ __u64 adm[APCB1_MASK_SIZE]; /* 0x0040 */
+ __u64 reserved60[4]; /* 0x0060 */
+};
+
struct kvm_s390_crypto_cb {
- __u8 reserved00[72]; /* 0x0000 */
- __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
- __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
- __u8 reserved80[128]; /* 0x0080 */
+ struct kvm_s390_apcb0 apcb0; /* 0x0000 */
+ __u8 reserved20[0x0048 - 0x0020]; /* 0x0020 */
+ __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
+ __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
+ struct kvm_s390_apcb1 apcb1; /* 0x0080 */
};
/*
@@ -736,7 +751,6 @@ struct kvm_arch{
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
- struct ratelimit_state sthyi_limit;
spinlock_t start_stop_lock;
struct sie_page2 *sie_page2;
struct kvm_s390_cpu_model model;
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index 41393052ac57..74eeec9c0a80 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for paravirtual devices on s390
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*/
/*
@@ -20,8 +17,6 @@
*
* Copyright IBM Corp. 2007,2008
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#ifndef __S390_KVM_PARA_H
#define __S390_KVM_PARA_H
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 6de5c6cb0061..672f95b12d40 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* livepatch.h - s390-specific Kernel Live Patching Core
*
@@ -7,13 +8,6 @@
* Jiri Slaby
*/
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
#ifndef ASM_LIVEPATCH_H
#define ASM_LIVEPATCH_H
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 917f7344cab6..ec6592e8ba36 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -115,32 +115,28 @@ struct lowcore {
/* Address space pointer. */
__u64 kernel_asce; /* 0x0378 */
__u64 user_asce; /* 0x0380 */
+ __u64 vdso_asce; /* 0x0388 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
- __u32 lpp; /* 0x0388 */
- __u32 current_pid; /* 0x038c */
+ __u32 lpp; /* 0x0390 */
+ __u32 current_pid; /* 0x0394 */
/* SMP info area */
- __u32 cpu_nr; /* 0x0390 */
- __u32 softirq_pending; /* 0x0394 */
- __u64 percpu_offset; /* 0x0398 */
- __u64 vdso_per_cpu_data; /* 0x03a0 */
- __u64 machine_flags; /* 0x03a8 */
- __u32 preempt_count; /* 0x03b0 */
- __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
- __u64 gmap; /* 0x03b8 */
- __u32 spinlock_lockval; /* 0x03c0 */
- __u32 fpu_flags; /* 0x03c4 */
- __u8 pad_0x03c8[0x0400-0x03c8]; /* 0x03c8 */
-
- /* Per cpu primary space access list */
- __u32 paste[16]; /* 0x0400 */
-
- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+ __u32 cpu_nr; /* 0x0398 */
+ __u32 softirq_pending; /* 0x039c */
+ __u32 preempt_count; /* 0x03a0 */
+ __u32 spinlock_lockval; /* 0x03a4 */
+ __u32 spinlock_index; /* 0x03a8 */
+ __u32 fpu_flags; /* 0x03ac */
+ __u64 percpu_offset; /* 0x03b0 */
+ __u64 vdso_per_cpu_data; /* 0x03b8 */
+ __u64 machine_flags; /* 0x03c0 */
+ __u64 gmap; /* 0x03c8 */
+ __u8 pad_0x03d0[0x0e00-0x03d0]; /* 0x03d0 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -192,14 +188,14 @@ extern struct lowcore *lowcore_ptr[];
static inline void set_prefix(__u32 address)
{
- asm volatile("spx %0" : : "m" (address) : "memory");
+ asm volatile("spx %0" : : "Q" (address) : "memory");
}
static inline __u32 store_prefix(void)
{
__u32 address;
- asm volatile("stpx %0" : "=m" (address));
+ asm volatile("stpx %0" : "=Q" (address));
return address;
}
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 43607bb12cc2..65154eaa3714 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||
- current->mm->context.alloc_pgste;
+ (current->mm && current->mm->context.alloc_pgste);
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
mm->context.use_cmma = 0;
@@ -44,6 +44,8 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce_limit = STACK_TOP_MAX;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ /* pgd_alloc() did not account this pud */
+ mm_inc_nr_puds(mm);
break;
case -PAGE_SIZE:
/* forked 5-level task, set new asce with new_mm->pgd */
@@ -59,7 +61,7 @@ static inline int init_new_context(struct task_struct *tsk,
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- /* pgd_alloc() did not increase mm->nr_pmds */
+ /* pgd_alloc() did not account this pmd */
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -71,41 +73,38 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm)
{
S390_lowcore.user_asce = mm->context.asce;
- if (current->thread.mm_segment.ar4)
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- set_cpu_flag(CIF_ASCE_PRIMARY);
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
}
static inline void clear_user_asce(void)
{
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
-
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
-}
-
-static inline void load_kernel_asce(void)
-{
- unsigned long asce;
-
- __ctl_store(asce, 1, 1);
- if (asce != S390_lowcore.kernel_asce)
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_cpu_flag(CIF_ASCE_PRIMARY);
}
+mm_segment_t enable_sacf_uaccess(void);
+void disable_sacf_uaccess(mm_segment_t old_fs);
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
+ S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- /* Clear old ASCE by loading the kernel ASCE. */
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ /* Clear previous user-ASCE from CR1 and CR7 */
+ if (!test_cpu_flag(CIF_ASCE_PRIMARY)) {
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ if (test_cpu_flag(CIF_ASCE_SECONDARY)) {
+ __ctl_load(S390_lowcore.vdso_asce, 7, 7);
+ clear_cpu_flag(CIF_ASCE_SECONDARY);
+ }
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -115,7 +114,6 @@ static inline void finish_arch_post_lock_switch(void)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- load_kernel_asce();
if (mm) {
preempt_disable();
while (atomic_read(&mm->context.flush_count))
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index c8a7beadd3d4..1e5dc4537bf2 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -26,12 +26,9 @@
#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
-
-#define MCCK_CR14_CR_PENDING_SUB_MASK (1 << 28)
-#define MCCK_CR14_RECOVERY_SUB_MASK (1 << 27)
-#define MCCK_CR14_DEGRAD_SUB_MASK (1 << 26)
-#define MCCK_CR14_EXT_DAMAGE_SUB_MASK (1 << 25)
-#define MCCK_CR14_WARN_SUB_MASK (1 << 24)
+#define MCCK_CODE_CR_VALID _BITUL(63 - 29)
+#define MCCK_CODE_GS_VALID _BITUL(63 - 36)
+#define MCCK_CODE_FC_VALID _BITUL(63 - 43)
#ifndef __ASSEMBLY__
@@ -87,6 +84,8 @@ union mci {
#define MCESA_ORIGIN_MASK (~0x3ffUL)
#define MCESA_LC_MASK (0xfUL)
+#define MCESA_MIN_SIZE (1024)
+#define MCESA_MAX_SIZE (2048)
struct mcesa {
u8 vector_save_area[1024];
@@ -95,8 +94,12 @@ struct mcesa {
struct pt_regs;
-extern void s390_handle_mcck(void);
-extern void s390_do_machine_check(struct pt_regs *regs);
+void nmi_alloc_boot_cpu(struct lowcore *lc);
+int nmi_alloc_per_cpu(struct lowcore *lc);
+void nmi_free_per_cpu(struct lowcore *lc);
+
+void s390_handle_mcck(void);
+void s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6c2c38060f8b..5dfe47588277 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -19,11 +19,7 @@ extern debug_info_t *pci_debug_err_id;
static inline void zpci_err_hex(void *addr, int len)
{
- while (len > 0) {
- debug_event(pci_debug_err_id, 0, (void *) addr, len);
- len -= pci_debug_err_id->buf_size;
- addr += pci_debug_err_id->buf_size;
- }
+ debug_event(pci_debug_err_id, 0, addr, len);
}
#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 419e83fa4721..ba22a6ea51a1 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 79aa6421fedb..b9c0e361748b 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -40,6 +40,7 @@ struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
/* Perf pt_regs extension for sample-data-entry indicators */
struct perf_sf_sde_regs {
@@ -64,27 +65,10 @@ struct perf_sf_sde_regs {
#define REG_OVERFLOW 1
#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
-#define RAWSAMPLE_REG(hwc) ((hwc)->config)
#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
-/* Structure for sampling data entries to be passed as perf raw sample data
- * to user space. Note that raw sample data must be aligned and, thus, might
- * be padded with zeros.
- */
-struct sf_raw_sample {
-#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
-#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
- u64 format;
- u32 size; /* Size of sf_raw_sample */
- u16 bsdes; /* Basic-sampling data entry size */
- u16 dsdes; /* Diagnostic-sampling data entry size */
- struct hws_basic_entry basic; /* Basic-sampling data entry */
- struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
- u8 padding[]; /* Padding to next multiple of 8 */
-} __packed;
-
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bbe99cb8219d..c7b4333d1de0 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -13,6 +13,7 @@
#define _S390_PGALLOC_H
#include <linux/threads.h>
+#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/mm.h>
@@ -28,24 +29,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste;
-static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
-{
- struct addrtype { char _[256]; };
- int i;
-
- for (i = 0; i < n; i += 256) {
- *s = val;
- asm volatile(
- "mvc 8(248,%[s]),0(%[s])\n"
- : "+m" (*(struct addrtype *) s)
- : [s] "a" (s));
- s += 256 / sizeof(long);
- }
-}
-
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{
- clear_table(crst, entry, _CRST_TABLE_SIZE);
+ memset64((u64 *)crst, entry, _CRST_ENTRIES);
}
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index d7fe9838084d..0a6b0286c32e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
}
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9cf92abe23c3..bfbfad482289 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -22,6 +22,7 @@
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
+#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
@@ -31,6 +32,7 @@
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST)
+#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU)
#ifndef __ASSEMBLY__
@@ -107,9 +109,7 @@ extern void execve_tail(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-typedef struct {
- __u32 ar4;
-} mm_segment_t;
+typedef unsigned int mm_segment_t;
/*
* Thread structure
@@ -219,10 +219,10 @@ void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
+static inline void release_thread(struct task_struct *tsk) { }
-/* Free guarded storage control block for current */
-void exit_thread_gs(void);
+/* Free guarded storage control block */
+void guarded_storage_release(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
@@ -245,7 +245,7 @@ static inline unsigned short stap(void)
{
unsigned short cpu_address;
- asm volatile("stap %0" : "=m" (cpu_address));
+ asm volatile("stap %0" : "=Q" (cpu_address));
return cpu_address;
}
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 2f84e77f1f1b..6f70d81c40f2 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -13,10 +13,12 @@
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
#define PIF_SYSCALL_RESTART 2 /* restart the current system call */
+#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define _PIF_SYSCALL _BITUL(PIF_SYSCALL)
#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP)
#define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART)
+#define _PIF_GUEST_FAULT _BITUL(PIF_GUEST_FAULT)
#ifndef __ASSEMBLY__
@@ -72,9 +74,14 @@ enum {
*/
struct pt_regs
{
- unsigned long args[1];
- psw_t psw;
- unsigned long gprs[NUM_GPRS];
+ union {
+ user_pt_regs user_regs;
+ struct {
+ unsigned long args[1];
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+ };
+ };
unsigned long orig_gpr2;
unsigned int int_code;
unsigned int int_parm;
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index ea8896ba5afc..6b1540337ed6 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -6,55 +6,55 @@
#define S390_RUNTIME_INSTR_STOP 0x2
struct runtime_instr_cb {
- __u64 buf_current;
- __u64 buf_origin;
- __u64 buf_limit;
+ __u64 rca;
+ __u64 roa;
+ __u64 rla;
- __u32 valid : 1;
- __u32 pstate : 1;
- __u32 pstate_set_buf : 1;
- __u32 home_space : 1;
- __u32 altered : 1;
- __u32 : 3;
- __u32 pstate_sample : 1;
- __u32 sstate_sample : 1;
- __u32 pstate_collect : 1;
- __u32 sstate_collect : 1;
- __u32 : 1;
- __u32 halted_int : 1;
- __u32 int_requested : 1;
- __u32 buffer_full_int : 1;
+ __u32 v : 1;
+ __u32 s : 1;
+ __u32 k : 1;
+ __u32 h : 1;
+ __u32 a : 1;
+ __u32 reserved1 : 3;
+ __u32 ps : 1;
+ __u32 qs : 1;
+ __u32 pc : 1;
+ __u32 qc : 1;
+ __u32 reserved2 : 1;
+ __u32 g : 1;
+ __u32 u : 1;
+ __u32 l : 1;
__u32 key : 4;
- __u32 : 9;
+ __u32 reserved3 : 8;
+ __u32 t : 1;
__u32 rgs : 3;
- __u32 mode : 4;
- __u32 next : 1;
+ __u32 m : 4;
+ __u32 n : 1;
__u32 mae : 1;
- __u32 : 2;
- __u32 call_type_br : 1;
- __u32 return_type_br : 1;
- __u32 other_type_br : 1;
- __u32 bc_other_type : 1;
- __u32 emit : 1;
- __u32 tx_abort : 1;
- __u32 : 2;
- __u32 bp_xn : 1;
- __u32 bp_xt : 1;
- __u32 bp_ti : 1;
- __u32 bp_ni : 1;
- __u32 suppr_y : 1;
- __u32 suppr_z : 1;
+ __u32 reserved4 : 2;
+ __u32 c : 1;
+ __u32 r : 1;
+ __u32 b : 1;
+ __u32 j : 1;
+ __u32 e : 1;
+ __u32 x : 1;
+ __u32 reserved5 : 2;
+ __u32 bpxn : 1;
+ __u32 bpxt : 1;
+ __u32 bpti : 1;
+ __u32 bpni : 1;
+ __u32 reserved6 : 2;
- __u32 dc_miss_extra : 1;
- __u32 lat_lev_ignore : 1;
- __u32 ic_lat_lev : 4;
- __u32 dc_lat_lev : 4;
+ __u32 d : 1;
+ __u32 f : 1;
+ __u32 ic : 4;
+ __u32 dc : 4;
- __u64 reserved1;
- __u64 scaling_factor;
+ __u64 reserved7;
+ __u64 sf;
__u64 rsic;
- __u64 reserved2;
+ __u64 reserved8;
} __packed __aligned(8);
extern struct runtime_instr_cb runtime_instr_empty_cb;
@@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
load_runtime_instr_cb(&runtime_instr_empty_cb);
}
-void exit_thread_runtime_instr(void);
+struct task_struct;
+
+void runtime_instr_release(struct task_struct *tsk);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
deleted file mode 100644
index f731b7b518bd..000000000000
--- a/arch/s390/include/asm/rwsem.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _S390_RWSEM_H
-#define _S390_RWSEM_H
-
-/*
- * S390 version
- * Copyright IBM Corp. 2002
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-/*
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
-#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- if (old < 0)
- rwsem_down_read_failed(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: ltgr %1,%0\n"
- " jm 1f\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- return old >= 0 ? 1 : 0;
-}
-
-/*
- * lock for writing
- */
-static inline long ___down_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = RWSEM_ACTIVE_WRITE_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
-
- return old;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- signed long old;
-
- asm volatile(
- " lg %0,%1\n"
- "0: ltgr %0,%0\n"
- " jnz 1f\n"
- " csg %0,%3,%1\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old), "=Q" (sem->count)
- : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
- : "cc", "memory");
- return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- if (new < 0)
- if ((new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = -RWSEM_ACTIVE_WRITE_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
- if (new < 0)
- if ((new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = -RWSEM_WAITING_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
- if (new > 1)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 0ac3e8166e85..54f81f8ed662 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,6 @@
#include <asm-generic/sections.h>
-extern char _eshared[], _ehead[];
+extern char _ehead[];
#endif
diff --git a/arch/s390/include/asm/segment.h b/arch/s390/include/asm/segment.h
index 8bfce3475b1c..97a0582b8d0f 100644
--- a/arch/s390/include/asm/segment.h
+++ b/arch/s390/include/asm/segment.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f2c2b7cd9099..2eb0c8a7b664 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -36,7 +36,7 @@
#define MACHINE_FLAG_SCC _BITUL(17)
#define LPP_MAGIC _BITUL(31)
-#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
+#define LPP_PID_MASK _AC(0xffffffff, UL)
#ifndef __ASSEMBLY__
@@ -98,9 +98,6 @@ extern char vmpoff_cmd[];
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
-#define NSS_NAME_SIZE 8
-extern char kernel_nss_name[];
-
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index babe83ed416c..3907ead27ffa 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -28,6 +28,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
+extern void smp_emergency_stop(void);
extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
@@ -53,6 +54,10 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
func(data);
}
+static inline void smp_emergency_stop(void)
+{
+}
+
static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f3f5e0155b10..0a29588aa00b 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -14,6 +14,7 @@
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/processor.h>
+#include <asm/alternative.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
@@ -36,20 +37,16 @@ bool arch_vcpu_is_preempted(int cpu);
* (the type definitions are in asm/spinlock_types.h)
*/
-void arch_lock_relax(int cpu);
+void arch_spin_relax(arch_spinlock_t *lock);
+#define arch_spin_relax arch_spin_relax
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
-void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
-
-static inline void arch_spin_relax(arch_spinlock_t *lock)
-{
- arch_lock_relax(lock->lock);
-}
+void arch_spin_lock_setup(int cpu);
static inline u32 arch_spin_lockval(int cpu)
{
- return ~cpu;
+ return cpu + 1;
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -65,8 +62,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
barrier();
- return likely(arch_spin_value_unlocked(*lp) &&
- __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -79,8 +75,9 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
if (!arch_spin_trylock_once(lp))
- arch_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait(lp);
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
@@ -93,11 +90,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0070\n" /* NIAI 7 */
-#endif
- " st %1,%0\n"
- : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
+ ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+ " sth %1,%0\n"
+ : "=Q" (((unsigned short *) &lp->lock)[1])
+ : "d" (0) : "cc", "memory");
}
/*
@@ -111,168 +107,53 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
-extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
-extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-static inline int arch_read_trylock_once(arch_rwlock_t *rw)
-{
- int old = ACCESS_ONCE(rw->lock);
- return likely(old >= 0 &&
- __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
-}
-
-static inline int arch_write_trylock_once(arch_rwlock_t *rw)
-{
- int old = ACCESS_ONCE(rw->lock);
- return likely(old == 0 &&
- __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
-}
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __RAW_OP_OR "lao"
-#define __RAW_OP_AND "lan"
-#define __RAW_OP_ADD "laa"
-
-#define __RAW_LOCK(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(int *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- "bcr 14,0\n" \
- : "=d" (old_val), "+Q" (*ptr) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#define __RAW_UNLOCK(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(int *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- : "=d" (old_val), "+Q" (*ptr) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
+#define arch_read_relax(rw) barrier()
+#define arch_write_relax(rw) barrier()
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
+void arch_read_lock_wait(arch_rwlock_t *lp);
+void arch_write_lock_wait(arch_rwlock_t *lp);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
int old;
- old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
- if (old < 0)
- _raw_read_lock_wait(rw);
+ old = __atomic_add(1, &rw->cnts);
+ if (old & 0xffff0000)
+ arch_read_lock_wait(rw);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+ __atomic_add_const_barrier(-1, &rw->cnts);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- int old;
-
- old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
- if (old != 0)
- _raw_write_lock_wait(rw, old);
- rw->owner = SPINLOCK_LOCKVAL;
+ if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+ arch_write_lock_wait(rw);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- rw->owner = 0;
- __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+ __atomic_add_barrier(-0x30000, &rw->cnts);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp);
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- if (!arch_read_trylock_once(rw))
- _raw_read_lock_wait(rw);
-}
-static inline void arch_read_unlock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int old;
- do {
- old = ACCESS_ONCE(rw->lock);
- } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- if (!arch_write_trylock_once(rw))
- _raw_write_lock_wait(rw);
- rw->owner = SPINLOCK_LOCKVAL;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- typecheck(int, rw->lock);
-
- rw->owner = 0;
- asm volatile(
- "st %1,%0\n"
- : "+Q" (rw->lock)
- : "d" (0)
- : "cc", "memory");
-}
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- if (!arch_read_trylock_once(rw))
- return _raw_read_trylock_retry(rw);
- return 1;
+ old = READ_ONCE(rw->cnts);
+ return (!(old & 0xffff0000) &&
+ __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
- return 0;
- rw->owner = SPINLOCK_LOCKVAL;
- return 1;
-}
-
-static inline void arch_read_relax(arch_rwlock_t *rw)
-{
- arch_lock_relax(rw->owner);
-}
+ int old;
-static inline void arch_write_relax(arch_rwlock_t *rw)
-{
- arch_lock_relax(rw->owner);
+ old = READ_ONCE(rw->cnts);
+ return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 1861a0c5dd47..cfed272e4fd5 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
typedef struct {
- int lock;
- int owner;
+ int cnts;
+ arch_spinlock_t wait;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 27ce494198f5..50f26fc9acb2 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -18,6 +18,9 @@
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMSET16 /* arch function */
+#define __HAVE_ARCH_MEMSET32 /* arch function */
+#define __HAVE_ARCH_MEMSET64 /* arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
#define __HAVE_ARCH_STRCMP /* arch function */
#define __HAVE_ARCH_STRCPY /* inline & arch function */
@@ -31,17 +34,17 @@
#define __HAVE_ARCH_STRSTR /* arch function */
/* Prototypes for non-inlined arch strings functions. */
-extern int memcmp(const void *, const void *, size_t);
-extern void *memcpy(void *, const void *, size_t);
-extern void *memset(void *, int, size_t);
-extern void *memmove(void *, const void *, size_t);
-extern int strcmp(const char *,const char *);
-extern size_t strlcat(char *, const char *, size_t);
-extern size_t strlcpy(char *, const char *, size_t);
-extern char *strncat(char *, const char *, size_t);
-extern char *strncpy(char *, const char *, size_t);
-extern char *strrchr(const char *, int);
-extern char *strstr(const char *, const char *);
+int memcmp(const void *s1, const void *s2, size_t n);
+void *memcpy(void *dest, const void *src, size_t n);
+void *memset(void *s, int c, size_t n);
+void *memmove(void *dest, const void *src, size_t n);
+int strcmp(const char *s1, const char *s2);
+size_t strlcat(char *dest, const char *src, size_t n);
+size_t strlcpy(char *dest, const char *src, size_t size);
+char *strncat(char *dest, const char *src, size_t n);
+char *strncpy(char *dest, const char *src, size_t n);
+char *strrchr(const char *s, int c);
+char *strstr(const char *s1, const char *s2);
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
@@ -50,7 +53,26 @@ extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
-#if !defined(IN_ARCH_STRING_C)
+void *__memset16(uint16_t *s, uint16_t v, size_t count);
+void *__memset32(uint32_t *s, uint32_t v, size_t count);
+void *__memset64(uint64_t *s, uint64_t v, size_t count);
+
+static inline void *memset16(uint16_t *s, uint16_t v, size_t count)
+{
+ return __memset16(s, v, count * sizeof(v));
+}
+
+static inline void *memset32(uint32_t *s, uint32_t v, size_t count)
+{
+ return __memset32(s, v, count * sizeof(v));
+}
+
+static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
+{
+ return __memset64(s, v, count * sizeof(v));
+}
+
+#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY))
static inline void *memchr(const void * s, int c, size_t n)
{
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index c21fe1d57c00..c61b2cc1a8a8 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
}
-#define switch_to(prev,next,last) do { \
- if (prev->mm) { \
- save_fpu_regs(); \
- save_access_regs(&prev->thread.acrs[0]); \
- save_ri_cb(prev->thread.ri_cb); \
- save_gs_cb(prev->thread.gs_cb); \
- } \
- if (next->mm) { \
- update_cr_regs(next); \
- set_cpu_flag(CIF_FPU); \
- restore_access_regs(&next->thread.acrs[0]); \
- restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
- restore_gs_cb(next->thread.gs_cb); \
- } \
- prev = __switch_to(prev,next); \
+#define switch_to(prev, next, last) do { \
+ /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
+ * a restore of the floating point / vector registers as \
+ * soon as the next task returns to user space \
+ */ \
+ save_fpu_regs(); \
+ save_access_regs(&prev->thread.acrs[0]); \
+ save_ri_cb(prev->thread.ri_cb); \
+ save_gs_cb(prev->thread.gs_cb); \
+ update_cr_regs(next); \
+ restore_access_regs(&next->thread.acrs[0]); \
+ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
+ restore_gs_cb(next->thread.gs_cb); \
+ prev = __switch_to(prev, next); \
} while (0)
#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6bc941be6921..96f9a9151fde 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Access to user system call parameters and results
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef _ASM_SYSCALL_H
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2b498e58b914..25057c118d56 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for store system information stsi
*
* Copyright IBM Corp. 2001, 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Ulrich Weigand <weigand@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
@@ -156,7 +153,8 @@ static inline unsigned char topology_mnest_limit(void)
struct topology_core {
unsigned char nl;
unsigned char reserved0[3];
- unsigned char :6;
+ unsigned char :5;
+ unsigned char d:1;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
@@ -198,4 +196,5 @@ struct service_level {
int register_service_level(struct service_level *);
int unregister_service_level(struct service_level *);
+int sthyi_fill(void *dst, u64 *rc);
#endif /* __ASM_S390_SYSINFO_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 55de4eb73604..cca406fdbe51 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -17,6 +17,7 @@ struct cpu_topology_s390 {
unsigned short book_id;
unsigned short drawer_id;
unsigned short node_id;
+ unsigned short dedicated : 1;
cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
@@ -35,6 +36,7 @@ extern cpumask_t cpus_with_topology;
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
+#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
#define mc_capable() 1
@@ -51,6 +53,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
static inline void topology_expect_change(void) { }
#endif /* CONFIG_SCHED_TOPOLOGY */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cdd0f0d999e2..ad6b91013a05 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -16,7 +16,7 @@
#include <asm/processor.h>
#include <asm/ctl_reg.h>
#include <asm/extable.h>
-
+#include <asm/facility.h>
/*
* The fs value determines whether argument validity checking should be
@@ -26,27 +26,16 @@
* For historical reasons, these macros are grossly misnamed.
*/
-#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
-
-
-#define KERNEL_DS MAKE_MM_SEG(0)
-#define USER_DS MAKE_MM_SEG(1)
+#define KERNEL_DS (0)
+#define KERNEL_DS_SACF (1)
+#define USER_DS (2)
+#define USER_DS_SACF (3)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
-#define segment_eq(a,b) ((a).ar4 == (b).ar4)
+#define segment_eq(a,b) (((a) & 2) == ((b) & 2))
-static inline void set_fs(mm_segment_t fs)
-{
- current->thread.mm_segment = fs;
- if (uaccess_kernel()) {
- set_cpu_flag(CIF_ASCE_SECONDARY);
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
- } else {
- clear_cpu_flag(CIF_ASCE_SECONDARY);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- }
-}
+void set_fs(mm_segment_t fs);
static inline int __range_ok(unsigned long addr, unsigned long size)
{
@@ -95,7 +84,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x810000UL;
+ unsigned long spec = 0x010000UL;
int rc;
switch (size) {
@@ -125,7 +114,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x81UL;
+ unsigned long spec = 0x01UL;
int rc;
switch (size) {
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bb2ce72300b0..169d7604eb80 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -46,7 +46,9 @@ struct vdso_per_cpu_data {
};
extern struct vdso_data *vdso_data;
+extern struct vdso_data boot_vdso_data;
+void vdso_alloc_boot_cpu(struct lowcore *lowcore);
int vdso_alloc_per_cpu(struct lowcore *lowcore);
void vdso_free_per_cpu(struct lowcore *lowcore);
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
index d375526c261f..605dc46bac5e 100644
--- a/arch/s390/include/asm/vga.h
+++ b/arch/s390/include/asm/vga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_VGA_H
#define _ASM_S390_VGA_H
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 098f28778a13..92b7c9b3e641 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..cefe7c7cd4f6
--- /dev/null
+++ b/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9ad172dcd912..4cdaa55fabfe 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -6,10 +6,6 @@
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
@@ -228,6 +224,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
+#define KVM_SYNC_BPBC (1UL << 10)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
@@ -251,7 +248,9 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding1[52]; /* riccb needs to be 64byte aligned */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
+ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
__u8 padding2[192]; /* sdnx needs to be 256byte aligned */
union {
diff --git a/arch/s390/include/uapi/asm/kvm_para.h b/arch/s390/include/uapi/asm/kvm_para.h
index 0dc86b3a7cb0..b9ab584adf43 100644
--- a/arch/s390/include/uapi/asm/kvm_para.h
+++ b/arch/s390/include/uapi/asm/kvm_para.h
@@ -4,9 +4,5 @@
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*/
diff --git a/arch/s390/include/uapi/asm/kvm_perf.h b/arch/s390/include/uapi/asm/kvm_perf.h
index c36c97ffdc6f..84606b8cc49e 100644
--- a/arch/s390/include/uapi/asm/kvm_perf.h
+++ b/arch/s390/include/uapi/asm/kvm_perf.h
@@ -4,10 +4,6 @@
*
* Copyright 2014 IBM Corp.
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef __LINUX_KVM_PERF_S390_H
diff --git a/arch/s390/include/uapi/asm/kvm_virtio.h b/arch/s390/include/uapi/asm/kvm_virtio.h
deleted file mode 100644
index 73283677a132..000000000000
--- a/arch/s390/include/uapi/asm/kvm_virtio.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * definition for virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#ifndef __KVM_S390_VIRTIO_H
-#define __KVM_S390_VIRTIO_H
-
-#include <linux/types.h>
-
-struct kvm_device_desc {
- /* The device type: console, network, disk etc. Type 0 terminates. */
- __u8 type;
- /* The number of virtqueues (first in config array) */
- __u8 num_vq;
- /*
- * The number of bytes of feature bits. Multiply by 2: one for host
- * features and one for guest acknowledgements.
- */
- __u8 feature_len;
- /* The number of bytes of the config array after virtqueues. */
- __u8 config_len;
- /* A status byte, written by the Guest. */
- __u8 status;
- __u8 config[0];
-};
-
-/*
- * This is how we expect the device configuration field for a virtqueue
- * to be laid out in config space.
- */
-struct kvm_vqconfig {
- /* The token returned with an interrupt. Set by the guest */
- __u64 token;
- /* The address of the virtio ring */
- __u64 address;
- /* The number of entries in the virtio_ring */
- __u16 num;
-
-};
-
-#define KVM_S390_VIRTIO_NOTIFY 0
-#define KVM_S390_VIRTIO_RESET 1
-#define KVM_S390_VIRTIO_SET_STATUS 2
-
-/* The alignment to use between consumer and producer parts of vring.
- * This is pagesize for historical reasons. */
-#define KVM_S390_VIRTIO_RING_ALIGN 4096
-
-
-/* These values are supposed to be in ext_params on an interrupt */
-#define VIRTIO_PARAM_MASK 0xff
-#define VIRTIO_PARAM_VRING_INTERRUPT 0x0
-#define VIRTIO_PARAM_CONFIG_CHANGED 0x1
-#define VIRTIO_PARAM_DEV_ADD 0x2
-
-#endif
diff --git a/arch/s390/include/uapi/asm/perf_regs.h b/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d17dd9e5d516
--- /dev/null
+++ b/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_S390_PERF_REGS_H
+#define _ASM_S390_PERF_REGS_H
+
+enum perf_event_s390_regs {
+ PERF_REG_S390_R0,
+ PERF_REG_S390_R1,
+ PERF_REG_S390_R2,
+ PERF_REG_S390_R3,
+ PERF_REG_S390_R4,
+ PERF_REG_S390_R5,
+ PERF_REG_S390_R6,
+ PERF_REG_S390_R7,
+ PERF_REG_S390_R8,
+ PERF_REG_S390_R9,
+ PERF_REG_S390_R10,
+ PERF_REG_S390_R11,
+ PERF_REG_S390_R12,
+ PERF_REG_S390_R13,
+ PERF_REG_S390_R14,
+ PERF_REG_S390_R15,
+ PERF_REG_S390_FP0,
+ PERF_REG_S390_FP1,
+ PERF_REG_S390_FP2,
+ PERF_REG_S390_FP3,
+ PERF_REG_S390_FP4,
+ PERF_REG_S390_FP5,
+ PERF_REG_S390_FP6,
+ PERF_REG_S390_FP7,
+ PERF_REG_S390_FP8,
+ PERF_REG_S390_FP9,
+ PERF_REG_S390_FP10,
+ PERF_REG_S390_FP11,
+ PERF_REG_S390_FP12,
+ PERF_REG_S390_FP13,
+ PERF_REG_S390_FP14,
+ PERF_REG_S390_FP15,
+ PERF_REG_S390_MASK,
+ PERF_REG_S390_PC,
+
+ PERF_REG_S390_MAX
+};
+
+#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 0d23c8ff2900..543dd70e12c8 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -162,7 +162,7 @@
#define GPR_SIZE 8
#define CR_SIZE 8
-#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
+#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
#endif /* __s390x__ */
@@ -179,17 +179,16 @@
#define ACR_SIZE 4
-#define PTRACE_OLDSETOPTIONS 21
+#define PTRACE_OLDSETOPTIONS 21
#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
-typedef union
-{
- float f;
- double d;
- __u64 ui;
+typedef union {
+ float f;
+ double d;
+ __u64 ui;
struct
{
__u32 hi;
@@ -197,23 +196,21 @@ typedef union
} fp;
} freg_t;
-typedef struct
-{
- __u32 fpc;
+typedef struct {
+ __u32 fpc;
__u32 pad;
- freg_t fprs[NUM_FPRS];
+ freg_t fprs[NUM_FPRS];
} s390_fp_regs;
-#define FPC_EXCEPTION_MASK 0xF8000000
-#define FPC_FLAGS_MASK 0x00F80000
-#define FPC_DXC_MASK 0x0000FF00
-#define FPC_RM_MASK 0x00000003
+#define FPC_EXCEPTION_MASK 0xF8000000
+#define FPC_FLAGS_MASK 0x00F80000
+#define FPC_DXC_MASK 0x0000FF00
+#define FPC_RM_MASK 0x00000003
/* this typedef defines how a Program Status Word looks like */
-typedef struct
-{
- unsigned long mask;
- unsigned long addr;
+typedef struct {
+ unsigned long mask;
+ unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
#ifndef __s390x__
@@ -282,8 +279,7 @@ typedef struct
/*
* The s390_regs structure is used to define the elf_gregset_t.
*/
-typedef struct
-{
+typedef struct {
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned int acrs[NUM_ACRS];
@@ -291,24 +287,32 @@ typedef struct
} s390_regs;
/*
+ * The user_pt_regs structure exports the beginning of
+ * the in-kernel pt_regs structure to user space.
+ */
+typedef struct {
+ unsigned long args[1];
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+} user_pt_regs;
+
+/*
* Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
* touch or even look at it if you don't want to modify the user-space
* ptrace interface. In particular stay away from it for in-kernel PER.
*/
-typedef struct
-{
+typedef struct {
unsigned long cr[NUM_CR_WORDS];
} per_cr_words;
#define PER_EM_MASK 0xE8000000UL
-typedef struct
-{
+typedef struct {
#ifdef __s390x__
- unsigned : 32;
+ unsigned : 32;
#endif /* __s390x__ */
- unsigned em_branching : 1;
+ unsigned em_branching : 1;
unsigned em_instruction_fetch : 1;
/*
* Switching on storage alteration automatically fixes
@@ -317,44 +321,41 @@ typedef struct
unsigned em_storage_alteration : 1;
unsigned em_gpr_alt_unused : 1;
unsigned em_store_real_address : 1;
- unsigned : 3;
+ unsigned : 3;
unsigned branch_addr_ctl : 1;
- unsigned : 1;
+ unsigned : 1;
unsigned storage_alt_space_ctl : 1;
- unsigned : 21;
+ unsigned : 21;
unsigned long starting_addr;
unsigned long ending_addr;
} per_cr_bits;
-typedef struct
-{
+typedef struct {
unsigned short perc_atmid;
unsigned long address;
unsigned char access_id;
} per_lowcore_words;
-typedef struct
-{
- unsigned perc_branching : 1;
+typedef struct {
+ unsigned perc_branching : 1;
unsigned perc_instruction_fetch : 1;
unsigned perc_storage_alteration : 1;
- unsigned perc_gpr_alt_unused : 1;
+ unsigned perc_gpr_alt_unused : 1;
unsigned perc_store_real_address : 1;
- unsigned : 3;
- unsigned atmid_psw_bit_31 : 1;
- unsigned atmid_validity_bit : 1;
- unsigned atmid_psw_bit_32 : 1;
- unsigned atmid_psw_bit_5 : 1;
- unsigned atmid_psw_bit_16 : 1;
- unsigned atmid_psw_bit_17 : 1;
- unsigned si : 2;
+ unsigned : 3;
+ unsigned atmid_psw_bit_31 : 1;
+ unsigned atmid_validity_bit : 1;
+ unsigned atmid_psw_bit_32 : 1;
+ unsigned atmid_psw_bit_5 : 1;
+ unsigned atmid_psw_bit_16 : 1;
+ unsigned atmid_psw_bit_17 : 1;
+ unsigned si : 2;
unsigned long address;
- unsigned : 4;
- unsigned access_id : 4;
+ unsigned : 4;
+ unsigned access_id : 4;
} per_lowcore_bits;
-typedef struct
-{
+typedef struct {
union {
per_cr_words words;
per_cr_bits bits;
@@ -364,9 +365,9 @@ typedef struct
* the kernel always sets them to zero. To enable single
* stepping use ptrace(PTRACE_SINGLESTEP) instead.
*/
- unsigned single_step : 1;
+ unsigned single_step : 1;
unsigned instruction_fetch : 1;
- unsigned : 30;
+ unsigned : 30;
/*
* These addresses are copied into cr10 & cr11 if single
* stepping is switched off
@@ -376,11 +377,10 @@ typedef struct
union {
per_lowcore_words words;
per_lowcore_bits bits;
- } lowcore;
+ } lowcore;
} per_struct;
-typedef struct
-{
+typedef struct {
unsigned int len;
unsigned long kernel_addr;
unsigned long process_addr;
@@ -390,12 +390,12 @@ typedef struct
* S/390 specific non posix ptrace requests. I chose unusual values so
* they are unlikely to clash with future ptrace definitions.
*/
-#define PTRACE_PEEKUSR_AREA 0x5000
-#define PTRACE_POKEUSR_AREA 0x5001
+#define PTRACE_PEEKUSR_AREA 0x5000
+#define PTRACE_POKEUSR_AREA 0x5001
#define PTRACE_PEEKTEXT_AREA 0x5002
#define PTRACE_PEEKDATA_AREA 0x5003
#define PTRACE_POKETEXT_AREA 0x5004
-#define PTRACE_POKEDATA_AREA 0x5005
+#define PTRACE_POKEDATA_AREA 0x5005
#define PTRACE_GET_LAST_BREAK 0x5006
#define PTRACE_PEEK_SYSTEM_CALL 0x5007
#define PTRACE_POKE_SYSTEM_CALL 0x5008
@@ -413,21 +413,19 @@ typedef struct
* PT_PROT definition is loosely based on hppa bsd definition in
* gdb/hppab-nat.c
*/
-#define PTRACE_PROT 21
+#define PTRACE_PROT 21
-typedef enum
-{
+typedef enum {
ptprot_set_access_watchpoint,
ptprot_set_write_watchpoint,
ptprot_disable_watchpoint
} ptprot_flags;
-typedef struct
-{
+typedef struct {
unsigned long lowaddr;
unsigned long hiaddr;
ptprot_flags prot;
-} ptprot_area;
+} ptprot_area;
/* Sequence of bytes for breakpoint illegal instruction. */
#define S390_BREAKPOINT {0x0,0x1}
@@ -439,8 +437,7 @@ typedef struct
* The user_regs_struct defines the way the user registers are
* store on the stack for signal handling.
*/
-struct user_regs_struct
-{
+struct user_regs_struct {
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned int acrs[NUM_ACRS];
diff --git a/arch/s390/include/uapi/asm/sthyi.h b/arch/s390/include/uapi/asm/sthyi.h
new file mode 100644
index 000000000000..b1b022316983
--- /dev/null
+++ b/arch/s390/include/uapi/asm/sthyi.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_STHYI_H
+#define _UAPI_ASM_STHYI_H
+
+#define STHYI_FC_CP_IFL_CAP 0
+
+#endif /* _UAPI_ASM_STHYI_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index b52bce8ee941..725120939051 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -316,7 +316,8 @@
#define __NR_pwritev2 377
#define __NR_s390_guarded_storage 378
#define __NR_statx 379
-#define NR_syscalls 380
+#define __NR_s390_sthyi 380
+#define NR_syscalls 381
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h
index 967aad390105..2b605f7e8483 100644
--- a/arch/s390/include/uapi/asm/virtio-ccw.h
+++ b/arch/s390/include/uapi/asm/virtio-ccw.h
@@ -1,13 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* Definitions for virtio-ccw devices.
*
* Copyright IBM Corp. 2013
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*/
#ifndef __KVM_VIRTIO_CCW_H
diff --git a/arch/s390/include/uapi/asm/vmcp.h b/arch/s390/include/uapi/asm/vmcp.h
index 4caf71714a55..aeaaa030030e 100644
--- a/arch/s390/include/uapi/asm/vmcp.h
+++ b/arch/s390/include/uapi/asm/vmcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright IBM Corp. 2004, 2005
* Interface implementation for communication with the z/VM control program
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 137ef473584e..d568307321fc 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -9,20 +9,6 @@
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ASM_S390_ZCRYPT_H
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4ce2d05929a7..909bce65cb2b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -34,6 +34,8 @@ AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
endif
+CFLAGS_als.o += -D__NO_FORTIFY
+
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
@@ -56,8 +58,8 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o
-obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o
+obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
+obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
extra-y += head.o head64.o vmlinux.lds
@@ -77,7 +79,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
-obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 000000000000..574e77622c04
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+ alt_instr_disabled = 1;
+ return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+ 0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+ &nop16,
+ &nop32,
+ &nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+ struct brcl_insn brcl = {
+ 0xc0f4,
+ len / 2
+ };
+
+ memcpy(insns, &brcl, sizeof(brcl));
+ insns += sizeof(brcl);
+ len -= sizeof(brcl);
+
+ while (len > 0) {
+ memcpy(insns, &nop16, 2);
+ insns += 2;
+ len -= 2;
+ }
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+ if (len > 6)
+ add_jump_padding(insns, len);
+ else if (len >= 2)
+ memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ struct alt_instr *a;
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ */
+ for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+ if (!test_facility(a->facility))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ WARN_ONCE(1, "cpu alternatives instructions length is "
+ "odd, skipping patching\n");
+ continue;
+ }
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ if (a->instrlen > a->replacementlen) {
+ add_padding(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+
+ s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ }
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ if (!alt_instr_disabled)
+ __apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 0e6d2b032484..587b195b588d 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <asm/vdso.h>
#include <asm/pgtable.h>
#include <asm/gmap.h>
+#include <asm/nmi.h>
/*
* Make sure that the compiler is new enough. We want a compiler that
@@ -159,6 +160,7 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
+ OFFSET(__LC_CLOCK_COMPARATOR, lowcore, clock_comparator);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
@@ -169,6 +171,7 @@ int main(void)
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
+ OFFSET(__LC_VDSO_ASCE, lowcore, vdso_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
@@ -176,7 +179,6 @@ int main(void)
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
- OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
@@ -194,6 +196,9 @@ int main(void)
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
BLANK();
+ /* extended machine check save area */
+ OFFSET(__MCESA_GS_SAVE_AREA, mcesa, guarded_storage_save_area);
+ BLANK();
/* gmap/sie offsets */
OFFSET(__GMAP_ASCE, gmap, asce);
OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index f04db3779b34..59eea9c65d3e 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
return retval;
}
+ groups_sort(group_info);
retval = set_current_groups(group_info);
put_group_info(group_info);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index a4a1208e3df3..ef246940b44c 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -50,19 +50,6 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;
-static inline void sigset_to_sigset32(unsigned long *set64,
- compat_sigset_word *set32)
-{
- set32[0] = (compat_sigset_word) set64[0];
- set32[1] = (compat_sigset_word)(set64[0] >> 32);
-}
-
-static inline void sigset32_to_sigset(compat_sigset_word *set32,
- unsigned long *set64)
-{
- set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
-}
-
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -294,12 +281,10 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
- compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
goto badframe;
- sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
save_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs))
@@ -317,12 +302,10 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
- compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
+ if (get_compat_sigset(&set, &frame->uc.uc_sigmask))
goto badframe;
- sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
@@ -372,7 +355,6 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
{
int sig = ksig->sig;
sigframe32 __user *frame;
- struct sigcontext32 sc;
unsigned long restorer;
size_t frame_size;
@@ -394,9 +376,10 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
/* Create struct sigcontext32 on the signal stack */
- sigset_to_sigset32(set->sig, sc.oldmask);
- sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
- if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
+ if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
+ set, sizeof(compat_sigset_t)))
+ return -EFAULT;
+ if (__put_user(ptr_to_compat(&frame->sc), &frame->sc.sregs))
return -EFAULT;
/* Store registers needed to create the signal frame */
@@ -455,7 +438,6 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
- compat_sigset_t cset;
rt_sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
@@ -502,12 +484,11 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
store_sigregs();
/* Create ucontext on the signal stack. */
- sigset_to_sigset32(set->sig, cset.sig);
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
- __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
+ put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index d04918583971..11e9d8b5c1b0 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -181,3 +181,4 @@ COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
+COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 05a9cf4ae9c2..80e974adb9e8 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -5,7 +5,7 @@
* Copyright IBM Corp. 1999, 2012
*
* Author(s): Michael Holzheu (holzheu@de.ibm.com),
- * Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ * Holger Smolinski (Holger.Smolinski@de.ibm.com)
*
* Bugreports to: <Linux390@de.ibm.com>
*/
@@ -37,69 +37,67 @@
typedef struct file_private_info {
loff_t offset; /* offset of last read in file */
- int act_area; /* number of last formated area */
- int act_page; /* act page in given area */
- int act_entry; /* last formated entry (offset */
- /* relative to beginning of last */
- /* formated page) */
- size_t act_entry_offset; /* up to this offset we copied */
+ int act_area; /* number of last formated area */
+ int act_page; /* act page in given area */
+ int act_entry; /* last formated entry (offset */
+ /* relative to beginning of last */
+ /* formated page) */
+ size_t act_entry_offset; /* up to this offset we copied */
/* in last read the last formated */
/* entry to userland */
char temp_buf[2048]; /* buffer for output */
- debug_info_t *debug_info_org; /* original debug information */
+ debug_info_t *debug_info_org; /* original debug information */
debug_info_t *debug_info_snap; /* snapshot of debug information */
struct debug_view *view; /* used view of debug info */
} file_private_info_t;
-typedef struct
-{
+typedef struct {
char *string;
- /*
- * This assumes that all args are converted into longs
- * on L/390 this is the case for all types of parameter
- * except of floats, and long long (32 bit)
+ /*
+ * This assumes that all args are converted into longs
+ * on L/390 this is the case for all types of parameter
+ * except of floats, and long long (32 bit)
*
*/
long args[0];
} debug_sprintf_entry_t;
-
/* internal function prototyes */
static int debug_init(void);
static ssize_t debug_output(struct file *file, char __user *user_buf,
- size_t user_len, loff_t * offset);
+ size_t user_len, loff_t *offset);
static ssize_t debug_input(struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset);
+ size_t user_len, loff_t *offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
- int nr_areas, int buf_size, umode_t mode);
+ int nr_areas, int buf_size, umode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
-static int debug_prolog_level_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf);
-static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_buf_size, loff_t * offset);
-static int debug_prolog_pages_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf);
-static int debug_input_pages_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_buf_size, loff_t * offset);
-static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_buf_size, loff_t * offset);
-static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, const char *in_buf);
-static int debug_raw_format_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf,
- const char *in_buf);
-static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
- int area, debug_entry_t * entry, char *out_buf);
-
-static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, debug_sprintf_entry_t *curr_event);
+static int debug_prolog_level_fn(debug_info_t *id,
+ struct debug_view *view, char *out_buf);
+static int debug_input_level_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_buf_size, loff_t *offset);
+static int debug_prolog_pages_fn(debug_info_t *id,
+ struct debug_view *view, char *out_buf);
+static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_buf_size, loff_t *offset);
+static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_buf_size, loff_t *offset);
+static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, const char *in_buf);
+static int debug_raw_format_fn(debug_info_t *id,
+ struct debug_view *view, char *out_buf,
+ const char *in_buf);
+static int debug_raw_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf);
+
+static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, debug_sprintf_entry_t *curr_event);
/* globals */
@@ -142,19 +140,19 @@ static struct debug_view debug_pages_view = {
};
static struct debug_view debug_flush_view = {
- "flush",
- NULL,
- NULL,
- NULL,
- &debug_input_flush_fn,
- NULL
+ "flush",
+ NULL,
+ NULL,
+ NULL,
+ &debug_input_flush_fn,
+ NULL
};
struct debug_view debug_sprintf_view = {
"sprintf",
NULL,
&debug_dflt_header_fn,
- (debug_format_proc_t*)&debug_sprintf_format_fn,
+ (debug_format_proc_t *)&debug_sprintf_format_fn,
NULL,
NULL
};
@@ -165,18 +163,18 @@ static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */
-static debug_info_t *debug_area_first = NULL;
-static debug_info_t *debug_area_last = NULL;
+static debug_info_t *debug_area_first;
+static debug_info_t *debug_area_last;
static DEFINE_MUTEX(debug_mutex);
static int initialized;
static int debug_critical;
static const struct file_operations debug_file_ops = {
- .owner = THIS_MODULE,
- .read = debug_output,
- .write = debug_input,
- .open = debug_open,
+ .owner = THIS_MODULE,
+ .read = debug_output,
+ .write = debug_input,
+ .open = debug_open,
.release = debug_close,
.llseek = no_llseek,
};
@@ -191,29 +189,23 @@ static struct dentry *debug_debugfs_root_entry;
* areas[areanumber][pagenumber][pageoffset]
*/
-static debug_entry_t***
-debug_areas_alloc(int pages_per_area, int nr_areas)
+static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
{
- debug_entry_t*** areas;
- int i,j;
+ debug_entry_t ***areas;
+ int i, j;
- areas = kmalloc(nr_areas *
- sizeof(debug_entry_t**),
- GFP_KERNEL);
+ areas = kmalloc(nr_areas * sizeof(debug_entry_t **), GFP_KERNEL);
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
- areas[i] = kmalloc(pages_per_area *
- sizeof(debug_entry_t*),GFP_KERNEL);
- if (!areas[i]) {
+ areas[i] = kmalloc(pages_per_area * sizeof(debug_entry_t *), GFP_KERNEL);
+ if (!areas[i])
goto fail_malloc_areas2;
- }
- for(j = 0; j < pages_per_area; j++) {
+ for (j = 0; j < pages_per_area; j++) {
areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if(!areas[i][j]) {
- for(j--; j >=0 ; j--) {
+ if (!areas[i][j]) {
+ for (j--; j >= 0 ; j--)
kfree(areas[i][j]);
- }
kfree(areas[i]);
goto fail_malloc_areas2;
}
@@ -222,62 +214,55 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
return areas;
fail_malloc_areas2:
- for(i--; i >= 0; i--){
- for(j=0; j < pages_per_area;j++){
+ for (i--; i >= 0; i--) {
+ for (j = 0; j < pages_per_area; j++)
kfree(areas[i][j]);
- }
kfree(areas[i]);
}
kfree(areas);
fail_malloc_areas:
return NULL;
-
}
-
/*
* debug_info_alloc
* - alloc new debug-info
*/
-
-static debug_info_t*
-debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
- int buf_size, int level, int mode)
+static debug_info_t *debug_info_alloc(const char *name, int pages_per_area,
+ int nr_areas, int buf_size, int level,
+ int mode)
{
- debug_info_t* rc;
+ debug_info_t *rc;
/* alloc everything */
-
rc = kmalloc(sizeof(debug_info_t), GFP_KERNEL);
- if(!rc)
+ if (!rc)
goto fail_malloc_rc;
rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
- if(!rc->active_entries)
+ if (!rc->active_entries)
goto fail_malloc_active_entries;
rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
- if(!rc->active_pages)
+ if (!rc->active_pages)
goto fail_malloc_active_pages;
- if((mode == ALL_AREAS) && (pages_per_area != 0)){
+ if ((mode == ALL_AREAS) && (pages_per_area != 0)) {
rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
- if(!rc->areas)
+ if (!rc->areas)
goto fail_malloc_areas;
} else {
rc->areas = NULL;
}
/* initialize members */
-
spin_lock_init(&rc->lock);
rc->pages_per_area = pages_per_area;
- rc->nr_areas = nr_areas;
+ rc->nr_areas = nr_areas;
rc->active_area = 0;
- rc->level = level;
- rc->buf_size = buf_size;
- rc->entry_size = sizeof(debug_entry_t) + buf_size;
+ rc->level = level;
+ rc->buf_size = buf_size;
+ rc->entry_size = sizeof(debug_entry_t) + buf_size;
strlcpy(rc->name, name, sizeof(rc->name));
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
- memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
- sizeof(struct dentry*));
+ memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *));
refcount_set(&(rc->ref_count), 0);
return rc;
@@ -296,18 +281,15 @@ fail_malloc_rc:
* debug_areas_free
* - free all debug areas
*/
-
-static void
-debug_areas_free(debug_info_t* db_info)
+static void debug_areas_free(debug_info_t *db_info)
{
- int i,j;
+ int i, j;
- if(!db_info->areas)
+ if (!db_info->areas)
return;
for (i = 0; i < db_info->nr_areas; i++) {
- for(j = 0; j < db_info->pages_per_area; j++) {
+ for (j = 0; j < db_info->pages_per_area; j++)
kfree(db_info->areas[i][j]);
- }
kfree(db_info->areas[i]);
}
kfree(db_info->areas);
@@ -318,9 +300,8 @@ debug_areas_free(debug_info_t* db_info)
* debug_info_free
* - free memory debug-info
*/
-
-static void
-debug_info_free(debug_info_t* db_info){
+static void debug_info_free(debug_info_t *db_info)
+{
debug_areas_free(db_info);
kfree(db_info->active_entries);
kfree(db_info->active_pages);
@@ -332,35 +313,34 @@ debug_info_free(debug_info_t* db_info){
* - create new debug-info
*/
-static debug_info_t*
-debug_info_create(const char *name, int pages_per_area, int nr_areas,
- int buf_size, umode_t mode)
+static debug_info_t *debug_info_create(const char *name, int pages_per_area,
+ int nr_areas, int buf_size, umode_t mode)
{
- debug_info_t* rc;
+ debug_info_t *rc;
- rc = debug_info_alloc(name, pages_per_area, nr_areas, buf_size,
- DEBUG_DEFAULT_LEVEL, ALL_AREAS);
- if(!rc)
+ rc = debug_info_alloc(name, pages_per_area, nr_areas, buf_size,
+ DEBUG_DEFAULT_LEVEL, ALL_AREAS);
+ if (!rc)
goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */
- rc->debugfs_root_entry = debugfs_create_dir(rc->name,
- debug_debugfs_root_entry);
+ rc->debugfs_root_entry = debugfs_create_dir(rc->name,
+ debug_debugfs_root_entry);
/* append new element to linked list */
- if (!debug_area_first) {
- /* first element in list */
- debug_area_first = rc;
- rc->prev = NULL;
- } else {
- /* append element to end of list */
- debug_area_last->next = rc;
- rc->prev = debug_area_last;
- }
- debug_area_last = rc;
- rc->next = NULL;
+ if (!debug_area_first) {
+ /* first element in list */
+ debug_area_first = rc;
+ rc->prev = NULL;
+ } else {
+ /* append element to end of list */
+ debug_area_last->next = rc;
+ rc->prev = debug_area_last;
+ }
+ debug_area_last = rc;
+ rc->next = NULL;
refcount_set(&rc->ref_count, 1);
out:
@@ -371,24 +351,22 @@ out:
* debug_info_copy
* - copy debug-info
*/
-
-static debug_info_t*
-debug_info_copy(debug_info_t* in, int mode)
+static debug_info_t *debug_info_copy(debug_info_t *in, int mode)
{
- int i,j;
- debug_info_t* rc;
- unsigned long flags;
+ unsigned long flags;
+ debug_info_t *rc;
+ int i, j;
/* get a consistent copy of the debug areas */
do {
rc = debug_info_alloc(in->name, in->pages_per_area,
in->nr_areas, in->buf_size, in->level, mode);
spin_lock_irqsave(&in->lock, flags);
- if(!rc)
+ if (!rc)
goto out;
/* has something changed in the meantime ? */
- if((rc->pages_per_area == in->pages_per_area) &&
- (rc->nr_areas == in->nr_areas)) {
+ if ((rc->pages_per_area == in->pages_per_area) &&
+ (rc->nr_areas == in->nr_areas)) {
break;
}
spin_unlock_irqrestore(&in->lock, flags);
@@ -396,25 +374,22 @@ debug_info_copy(debug_info_t* in, int mode)
} while (1);
if (mode == NO_AREAS)
- goto out;
+ goto out;
- for(i = 0; i < in->nr_areas; i++){
- for(j = 0; j < in->pages_per_area; j++) {
- memcpy(rc->areas[i][j], in->areas[i][j],PAGE_SIZE);
- }
- }
+ for (i = 0; i < in->nr_areas; i++) {
+ for (j = 0; j < in->pages_per_area; j++)
+ memcpy(rc->areas[i][j], in->areas[i][j], PAGE_SIZE);
+ }
out:
- spin_unlock_irqrestore(&in->lock, flags);
- return rc;
+ spin_unlock_irqrestore(&in->lock, flags);
+ return rc;
}
/*
* debug_info_get
* - increments reference count for debug-info
*/
-
-static void
-debug_info_get(debug_info_t * db_info)
+static void debug_info_get(debug_info_t *db_info)
{
if (db_info)
refcount_inc(&db_info->ref_count);
@@ -424,9 +399,7 @@ debug_info_get(debug_info_t * db_info)
* debug_info_put:
* - decreases reference count for debug-info and frees it if necessary
*/
-
-static void
-debug_info_put(debug_info_t *db_info)
+static void debug_info_put(debug_info_t *db_info)
{
int i;
@@ -439,12 +412,14 @@ debug_info_put(debug_info_t *db_info)
debugfs_remove(db_info->debugfs_entries[i]);
}
debugfs_remove(db_info->debugfs_root_entry);
- if(db_info == debug_area_first)
+ if (db_info == debug_area_first)
debug_area_first = db_info->next;
- if(db_info == debug_area_last)
+ if (db_info == debug_area_last)
debug_area_last = db_info->prev;
- if(db_info->prev) db_info->prev->next = db_info->next;
- if(db_info->next) db_info->next->prev = db_info->prev;
+ if (db_info->prev)
+ db_info->prev->next = db_info->next;
+ if (db_info->next)
+ db_info->next->prev = db_info->prev;
debug_info_free(db_info);
}
}
@@ -453,71 +428,68 @@ debug_info_put(debug_info_t *db_info)
* debug_format_entry:
* - format one debug entry and return size of formated data
*/
-
-static int
-debug_format_entry(file_private_info_t *p_info)
+static int debug_format_entry(file_private_info_t *p_info)
{
- debug_info_t *id_snap = p_info->debug_info_snap;
+ debug_info_t *id_snap = p_info->debug_info_snap;
struct debug_view *view = p_info->view;
debug_entry_t *act_entry;
size_t len = 0;
- if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+
+ if (p_info->act_entry == DEBUG_PROLOG_ENTRY) {
/* print prolog */
- if (view->prolog_proc)
- len += view->prolog_proc(id_snap,view,p_info->temp_buf);
+ if (view->prolog_proc)
+ len += view->prolog_proc(id_snap, view, p_info->temp_buf);
goto out;
}
if (!id_snap->areas) /* this is true, if we have a prolog only view */
goto out; /* or if 'pages_per_area' is 0 */
- act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area]
- [p_info->act_page] + p_info->act_entry);
-
+ act_entry = (debug_entry_t *) ((char *)id_snap->areas[p_info->act_area]
+ [p_info->act_page] + p_info->act_entry);
+
if (act_entry->id.stck == 0LL)
- goto out; /* empty entry */
+ goto out; /* empty entry */
if (view->header_proc)
len += view->header_proc(id_snap, view, p_info->act_area,
- act_entry, p_info->temp_buf + len);
+ act_entry, p_info->temp_buf + len);
if (view->format_proc)
len += view->format_proc(id_snap, view, p_info->temp_buf + len,
- DEBUG_DATA(act_entry));
+ DEBUG_DATA(act_entry));
out:
- return len;
+ return len;
}
/*
* debug_next_entry:
* - goto next entry in p_info
*/
-
-static inline int
-debug_next_entry(file_private_info_t *p_info)
+static inline int debug_next_entry(file_private_info_t *p_info)
{
debug_info_t *id;
id = p_info->debug_info_snap;
- if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+ if (p_info->act_entry == DEBUG_PROLOG_ENTRY) {
p_info->act_entry = 0;
p_info->act_page = 0;
goto out;
}
- if(!id->areas)
+ if (!id->areas)
return 1;
p_info->act_entry += id->entry_size;
/* switch to next page, if we reached the end of the page */
- if (p_info->act_entry > (PAGE_SIZE - id->entry_size)){
+ if (p_info->act_entry > (PAGE_SIZE - id->entry_size)) {
/* next page */
p_info->act_entry = 0;
p_info->act_page += 1;
- if((p_info->act_page % id->pages_per_area) == 0) {
+ if ((p_info->act_page % id->pages_per_area) == 0) {
/* next area */
- p_info->act_area++;
- p_info->act_page=0;
+ p_info->act_area++;
+ p_info->act_page = 0;
}
- if(p_info->act_area >= id->nr_areas)
+ if (p_info->act_area >= id->nr_areas)
return 1;
}
out:
- return 0;
+ return 0;
}
/*
@@ -525,26 +497,24 @@ out:
* - called for user read()
* - copies formated debug entries to the user buffer
*/
-
-static ssize_t
-debug_output(struct file *file, /* file descriptor */
- char __user *user_buf, /* user buffer */
- size_t len, /* length of buffer */
- loff_t *offset) /* offset in the file */
+static ssize_t debug_output(struct file *file, /* file descriptor */
+ char __user *user_buf, /* user buffer */
+ size_t len, /* length of buffer */
+ loff_t *offset) /* offset in the file */
{
size_t count = 0;
size_t entry_offset;
file_private_info_t *p_info;
- p_info = ((file_private_info_t *) file->private_data);
- if (*offset != p_info->offset)
+ p_info = (file_private_info_t *) file->private_data;
+ if (*offset != p_info->offset)
return -EPIPE;
- if(p_info->act_area >= p_info->debug_info_snap->nr_areas)
+ if (p_info->act_area >= p_info->debug_info_snap->nr_areas)
return 0;
entry_offset = p_info->act_entry_offset;
- while(count < len){
- int formatted_line_size;
+ while (count < len) {
int formatted_line_residue;
+ int formatted_line_size;
int user_buf_residue;
size_t copy_size;
@@ -552,21 +522,21 @@ debug_output(struct file *file, /* file descriptor */
formatted_line_residue = formatted_line_size - entry_offset;
user_buf_residue = len-count;
copy_size = min(user_buf_residue, formatted_line_residue);
- if(copy_size){
+ if (copy_size) {
if (copy_to_user(user_buf + count, p_info->temp_buf
- + entry_offset, copy_size))
+ + entry_offset, copy_size))
return -EFAULT;
count += copy_size;
entry_offset += copy_size;
}
- if(copy_size == formatted_line_residue){
+ if (copy_size == formatted_line_residue) {
entry_offset = 0;
- if(debug_next_entry(p_info))
+ if (debug_next_entry(p_info))
goto out;
}
}
out:
- p_info->offset = *offset + count;
+ p_info->offset = *offset + count;
p_info->act_entry_offset = entry_offset;
*offset = p_info->offset;
return count;
@@ -577,24 +547,23 @@ out:
* - called for user write()
* - calls input function of view
*/
-
-static ssize_t
-debug_input(struct file *file, const char __user *user_buf, size_t length,
- loff_t *offset)
+static ssize_t debug_input(struct file *file, const char __user *user_buf,
+ size_t length, loff_t *offset)
{
- int rc = 0;
file_private_info_t *p_info;
+ int rc = 0;
mutex_lock(&debug_mutex);
p_info = ((file_private_info_t *) file->private_data);
- if (p_info->view->input_proc)
+ if (p_info->view->input_proc) {
rc = p_info->view->input_proc(p_info->debug_info_org,
p_info->view, file, user_buf,
length, offset);
- else
+ } else {
rc = -EPERM;
+ }
mutex_unlock(&debug_mutex);
- return rc; /* number of input characters */
+ return rc; /* number of input characters */
}
/*
@@ -603,13 +572,11 @@ debug_input(struct file *file, const char __user *user_buf, size_t length,
* - copies formated output to private_data area of the file
* handle
*/
-
-static int
-debug_open(struct inode *inode, struct file *file)
+static int debug_open(struct inode *inode, struct file *file)
{
- int i, rc = 0;
- file_private_info_t *p_info;
debug_info_t *debug_info, *debug_info_snapshot;
+ file_private_info_t *p_info;
+ int i, rc = 0;
mutex_lock(&debug_mutex);
debug_info = file_inode(file)->i_private;
@@ -617,10 +584,8 @@ debug_open(struct inode *inode, struct file *file)
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!debug_info->views[i])
continue;
- else if (debug_info->debugfs_entries[i] ==
- file->f_path.dentry) {
- goto found; /* found view ! */
- }
+ else if (debug_info->debugfs_entries[i] == file->f_path.dentry)
+ goto found; /* found view ! */
}
/* no entry found */
rc = -EINVAL;
@@ -628,31 +593,28 @@ debug_open(struct inode *inode, struct file *file)
found:
- /* Make snapshot of current debug areas to get it consistent. */
+ /* Make snapshot of current debug areas to get it consistent. */
/* To copy all the areas is only needed, if we have a view which */
/* formats the debug areas. */
- if(!debug_info->views[i]->format_proc &&
- !debug_info->views[i]->header_proc){
+ if (!debug_info->views[i]->format_proc && !debug_info->views[i]->header_proc)
debug_info_snapshot = debug_info_copy(debug_info, NO_AREAS);
- } else {
+ else
debug_info_snapshot = debug_info_copy(debug_info, ALL_AREAS);
- }
- if(!debug_info_snapshot){
+ if (!debug_info_snapshot) {
rc = -ENOMEM;
goto out;
}
- p_info = kmalloc(sizeof(file_private_info_t),
- GFP_KERNEL);
- if(!p_info){
+ p_info = kmalloc(sizeof(file_private_info_t), GFP_KERNEL);
+ if (!p_info) {
debug_info_free(debug_info_snapshot);
rc = -ENOMEM;
goto out;
}
p_info->offset = 0;
p_info->debug_info_snap = debug_info_snapshot;
- p_info->debug_info_org = debug_info;
+ p_info->debug_info_org = debug_info;
p_info->view = debug_info->views[i];
p_info->act_area = 0;
p_info->act_page = 0;
@@ -671,17 +633,16 @@ out:
* - called for user close()
* - deletes private_data area of the file handle
*/
-
-static int
-debug_close(struct inode *inode, struct file *file)
+static int debug_close(struct inode *inode, struct file *file)
{
file_private_info_t *p_info;
+
p_info = (file_private_info_t *) file->private_data;
- if(p_info->debug_info_snap)
+ if (p_info->debug_info_snap)
debug_info_free(p_info->debug_info_snap);
debug_info_put(p_info->debug_info_org);
kfree(file->private_data);
- return 0; /* success */
+ return 0; /* success */
}
/*
@@ -690,7 +651,6 @@ debug_close(struct inode *inode, struct file *file)
* The mode parameter allows to specify access rights for the s390dbf files
* - Returns handle for debug area
*/
-
debug_info_t *debug_register_mode(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode,
uid_t uid, gid_t gid)
@@ -704,18 +664,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
BUG_ON(!initialized);
mutex_lock(&debug_mutex);
- /* create new debug_info */
-
+ /* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
- if(!rc)
+ if (!rc)
goto out;
debug_register_view(rc, &debug_level_view);
- debug_register_view(rc, &debug_flush_view);
+ debug_register_view(rc, &debug_flush_view);
debug_register_view(rc, &debug_pages_view);
out:
- if (!rc){
+ if (!rc)
pr_err("Registering debug feature %s failed\n", name);
- }
mutex_unlock(&debug_mutex);
return rc;
}
@@ -726,7 +684,6 @@ EXPORT_SYMBOL(debug_register_mode);
* - creates and initializes debug area for the caller
* - returns handle for debug area
*/
-
debug_info_t *debug_register(const char *name, int pages_per_area,
int nr_areas, int buf_size)
{
@@ -739,18 +696,13 @@ EXPORT_SYMBOL(debug_register);
* debug_unregister:
* - give back debug area
*/
-
-void
-debug_unregister(debug_info_t * id)
+void debug_unregister(debug_info_t *id)
{
if (!id)
- goto out;
+ return;
mutex_lock(&debug_mutex);
debug_info_put(id);
mutex_unlock(&debug_mutex);
-
-out:
- return;
}
EXPORT_SYMBOL(debug_unregister);
@@ -758,18 +710,17 @@ EXPORT_SYMBOL(debug_unregister);
* debug_set_size:
* - set area size (number of pages) and number of areas
*/
-static int
-debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
+static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
{
+ debug_entry_t ***new_areas;
unsigned long flags;
- debug_entry_t *** new_areas;
- int rc=0;
+ int rc = 0;
- if(!id || (nr_areas <= 0) || (pages_per_area < 0))
+ if (!id || (nr_areas <= 0) || (pages_per_area < 0))
return -EINVAL;
- if(pages_per_area > 0){
+ if (pages_per_area > 0) {
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
- if(!new_areas) {
+ if (!new_areas) {
pr_info("Allocating memory for %i pages failed\n",
pages_per_area);
rc = -ENOMEM;
@@ -778,16 +729,16 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
} else {
new_areas = NULL;
}
- spin_lock_irqsave(&id->lock,flags);
+ spin_lock_irqsave(&id->lock, flags);
debug_areas_free(id);
id->areas = new_areas;
id->nr_areas = nr_areas;
id->pages_per_area = pages_per_area;
id->active_area = 0;
- memset(id->active_entries,0,sizeof(int)*id->nr_areas);
+ memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
- spin_unlock_irqrestore(&id->lock,flags);
- pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area);
+ spin_unlock_irqrestore(&id->lock, flags);
+ pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
out:
return rc;
}
@@ -796,24 +747,23 @@ out:
* debug_set_level:
* - set actual debug level
*/
-
-void
-debug_set_level(debug_info_t* id, int new_level)
+void debug_set_level(debug_info_t *id, int new_level)
{
unsigned long flags;
- if(!id)
- return;
- spin_lock_irqsave(&id->lock,flags);
- if(new_level == DEBUG_OFF_LEVEL){
- id->level = DEBUG_OFF_LEVEL;
- pr_info("%s: switched off\n",id->name);
- } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
+
+ if (!id)
+ return;
+ spin_lock_irqsave(&id->lock, flags);
+ if (new_level == DEBUG_OFF_LEVEL) {
+ id->level = DEBUG_OFF_LEVEL;
+ pr_info("%s: switched off\n", id->name);
+ } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
pr_info("%s: level %i is out of range (%i - %i)\n",
- id->name, new_level, 0, DEBUG_MAX_LEVEL);
- } else {
- id->level = new_level;
- }
- spin_unlock_irqrestore(&id->lock,flags);
+ id->name, new_level, 0, DEBUG_MAX_LEVEL);
+ } else {
+ id->level = new_level;
+ }
+ spin_unlock_irqrestore(&id->lock, flags);
}
EXPORT_SYMBOL(debug_set_level);
@@ -821,12 +771,10 @@ EXPORT_SYMBOL(debug_set_level);
* proceed_active_entry:
* - set active entry to next in the ring buffer
*/
-
-static inline void
-proceed_active_entry(debug_info_t * id)
+static inline void proceed_active_entry(debug_info_t *id)
{
if ((id->active_entries[id->active_area] += id->entry_size)
- > (PAGE_SIZE - id->entry_size)){
+ > (PAGE_SIZE - id->entry_size)) {
id->active_entries[id->active_area] = 0;
id->active_pages[id->active_area] =
(id->active_pages[id->active_area] + 1) %
@@ -838,9 +786,7 @@ proceed_active_entry(debug_info_t * id)
* proceed_active_area:
* - set active area to next in the ring buffer
*/
-
-static inline void
-proceed_active_area(debug_info_t * id)
+static inline void proceed_active_area(debug_info_t *id)
{
id->active_area++;
id->active_area = id->active_area % id->nr_areas;
@@ -849,13 +795,11 @@ proceed_active_area(debug_info_t * id)
/*
* get_active_entry:
*/
-
-static inline debug_entry_t*
-get_active_entry(debug_info_t * id)
+static inline debug_entry_t *get_active_entry(debug_info_t *id)
{
return (debug_entry_t *) (((char *) id->areas[id->active_area]
- [id->active_pages[id->active_area]]) +
- id->active_entries[id->active_area]);
+ [id->active_pages[id->active_area]]) +
+ id->active_entries[id->active_area]);
}
/*
@@ -863,23 +807,22 @@ get_active_entry(debug_info_t * id)
* - set timestamp, caller address, cpu number etc.
*/
-static inline void
-debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
- int exception)
+static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active,
+ int level, int exception)
{
active->id.stck = get_tod_clock_fast() -
*(unsigned long long *) &tod_clock_base[1];
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
- active->id.fields.level = level;
+ active->id.fields.level = level;
proceed_active_entry(id);
- if(exception)
+ if (exception)
proceed_active_area(id);
}
-static int debug_stoppable=1;
-static int debug_active=1;
+static int debug_stoppable = 1;
+static int debug_active = 1;
#define CTL_S390DBF_STOPPABLE 5678
#define CTL_S390DBF_ACTIVE 5679
@@ -889,9 +832,8 @@ static int debug_active=1;
* always allow read, allow write only if debug_stoppable is set or
* if debug_active is already off
*/
-static int
-s390dbf_procactive(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int s390dbf_procactive(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
return proc_dointvec(table, write, buffer, lenp, ppos);
@@ -899,39 +841,37 @@ s390dbf_procactive(struct ctl_table *table, int write,
return 0;
}
-
static struct ctl_table s390dbf_table[] = {
{
- .procname = "debug_stoppable",
+ .procname = "debug_stoppable",
.data = &debug_stoppable,
.maxlen = sizeof(int),
- .mode = S_IRUGO | S_IWUSR,
- .proc_handler = proc_dointvec,
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = proc_dointvec,
},
- {
- .procname = "debug_active",
+ {
+ .procname = "debug_active",
.data = &debug_active,
.maxlen = sizeof(int),
- .mode = S_IRUGO | S_IWUSR,
- .proc_handler = s390dbf_procactive,
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = s390dbf_procactive,
},
{ }
};
static struct ctl_table s390dbf_dir_table[] = {
{
- .procname = "s390dbf",
- .maxlen = 0,
- .mode = S_IRUGO | S_IXUGO,
- .child = s390dbf_table,
+ .procname = "s390dbf",
+ .maxlen = 0,
+ .mode = S_IRUGO | S_IXUGO,
+ .child = s390dbf_table,
},
{ }
};
static struct ctl_table_header *s390dbf_sysctl_header;
-void
-debug_stop_all(void)
+void debug_stop_all(void)
{
if (debug_stoppable)
debug_active = 0;
@@ -947,26 +887,31 @@ void debug_set_critical(void)
* debug_event_common:
* - write debug entry with given size
*/
-
-debug_entry_t*
-debug_event_common(debug_info_t * id, int level, const void *buf, int len)
+debug_entry_t *debug_event_common(debug_info_t *id, int level, const void *buf,
+ int len)
{
- unsigned long flags;
debug_entry_t *active;
+ unsigned long flags;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
- active = get_active_entry(id);
- memset(DEBUG_DATA(active), 0, id->buf_size);
- memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
- debug_finish_entry(id, active, level, 0);
- spin_unlock_irqrestore(&id->lock, flags);
+ }
+ do {
+ active = get_active_entry(id);
+ memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+ if (len < id->buf_size)
+ memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
+ debug_finish_entry(id, active, level, 0);
+ len -= id->buf_size;
+ buf += id->buf_size;
+ } while (len > 0);
+ spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(debug_event_common);
@@ -975,26 +920,31 @@ EXPORT_SYMBOL(debug_event_common);
* debug_exception_common:
* - write debug entry with given size and switch to next debug area
*/
-
-debug_entry_t
-*debug_exception_common(debug_info_t * id, int level, const void *buf, int len)
+debug_entry_t *debug_exception_common(debug_info_t *id, int level,
+ const void *buf, int len)
{
- unsigned long flags;
debug_entry_t *active;
+ unsigned long flags;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
- active = get_active_entry(id);
- memset(DEBUG_DATA(active), 0, id->buf_size);
- memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
- debug_finish_entry(id, active, level, 1);
- spin_unlock_irqrestore(&id->lock, flags);
+ }
+ do {
+ active = get_active_entry(id);
+ memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+ if (len < id->buf_size)
+ memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
+ debug_finish_entry(id, active, level, len <= id->buf_size);
+ len -= id->buf_size;
+ buf += id->buf_size;
+ } while (len > 0);
+ spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(debug_exception_common);
@@ -1002,47 +952,44 @@ EXPORT_SYMBOL(debug_exception_common);
/*
* counts arguments in format string for sprintf view
*/
-
-static inline int
-debug_count_numargs(char *string)
+static inline int debug_count_numargs(char *string)
{
- int numargs=0;
+ int numargs = 0;
- while(*string) {
- if(*string++=='%')
+ while (*string) {
+ if (*string++ == '%')
numargs++;
}
- return(numargs);
+ return numargs;
}
/*
* debug_sprintf_event:
*/
-
-debug_entry_t*
-__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
+debug_entry_t *__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
{
- va_list ap;
- int numargs,idx;
- unsigned long flags;
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
+ unsigned long flags;
+ int numargs, idx;
+ va_list ap;
if (!debug_active || !id->areas)
return NULL;
- numargs=debug_count_numargs(string);
+ numargs = debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
+ }
active = get_active_entry(id);
- curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
- va_start(ap,string);
- curr_event->string=string;
- for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
- curr_event->args[idx]=va_arg(ap,long);
+ curr_event = (debug_sprintf_entry_t *) DEBUG_DATA(active);
+ va_start(ap, string);
+ curr_event->string = string;
+ for (idx = 0; idx < min(numargs, (int)(id->buf_size / sizeof(long)) - 1); idx++)
+ curr_event->args[idx] = va_arg(ap, long);
va_end(ap);
debug_finish_entry(id, active, level, 0);
spin_unlock_irqrestore(&id->lock, flags);
@@ -1054,32 +1001,31 @@ EXPORT_SYMBOL(__debug_sprintf_event);
/*
* debug_sprintf_exception:
*/
-
-debug_entry_t*
-__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
+debug_entry_t *__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
{
- va_list ap;
- int numargs,idx;
- unsigned long flags;
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
+ unsigned long flags;
+ int numargs, idx;
+ va_list ap;
if (!debug_active || !id->areas)
return NULL;
- numargs=debug_count_numargs(string);
+ numargs = debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
+ }
active = get_active_entry(id);
- curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
- va_start(ap,string);
- curr_event->string=string;
- for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
- curr_event->args[idx]=va_arg(ap,long);
+ curr_event = (debug_sprintf_entry_t *)DEBUG_DATA(active);
+ va_start(ap, string);
+ curr_event->string = string;
+ for (idx = 0; idx < min(numargs, (int)(id->buf_size / sizeof(long)) - 1); idx++)
+ curr_event->args[idx] = va_arg(ap, long);
va_end(ap);
debug_finish_entry(id, active, level, 1);
spin_unlock_irqrestore(&id->lock, flags);
@@ -1091,15 +1037,13 @@ EXPORT_SYMBOL(__debug_sprintf_exception);
/*
* debug_register_view:
*/
-
-int
-debug_register_view(debug_info_t * id, struct debug_view *view)
+int debug_register_view(debug_info_t *id, struct debug_view *view)
{
- int rc = 0;
- int i;
unsigned long flags;
- umode_t mode;
struct dentry *pde;
+ umode_t mode;
+ int rc = 0;
+ int i;
if (!id)
goto out;
@@ -1109,10 +1053,10 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
if (!view->input_proc)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
- id , &debug_file_ops);
- if (!pde){
+ id, &debug_file_ops);
+ if (!pde) {
pr_err("Registering view %s/%s failed due to out of "
- "memory\n", id->name,view->name);
+ "memory\n", id->name, view->name);
rc = -1;
goto out;
}
@@ -1140,9 +1084,7 @@ EXPORT_SYMBOL(debug_register_view);
/*
* debug_unregister_view:
*/
-
-int
-debug_unregister_view(debug_info_t * id, struct debug_view *view)
+int debug_unregister_view(debug_info_t *id, struct debug_view *view)
{
struct dentry *dentry = NULL;
unsigned long flags;
@@ -1155,9 +1097,9 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
if (id->views[i] == view)
break;
}
- if (i == DEBUG_MAX_VIEWS)
+ if (i == DEBUG_MAX_VIEWS) {
rc = -1;
- else {
+ } else {
dentry = id->debugfs_entries[i];
id->views[i] = NULL;
id->debugfs_entries[i] = NULL;
@@ -1169,10 +1111,10 @@ out:
}
EXPORT_SYMBOL(debug_unregister_view);
-static inline char *
-debug_get_user_string(const char __user *user_buf, size_t user_len)
+static inline char *debug_get_user_string(const char __user *user_buf,
+ size_t user_len)
{
- char* buffer;
+ char *buffer;
buffer = kmalloc(user_len + 1, GFP_KERNEL);
if (!buffer)
@@ -1186,19 +1128,17 @@ debug_get_user_string(const char __user *user_buf, size_t user_len)
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
- return buffer;
+ return buffer;
}
-static inline int
-debug_get_uint(char *buf)
+static inline int debug_get_uint(char *buf)
{
int rc;
buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
- if(*buf){
+ if (*buf)
rc = -EINVAL;
- }
return rc;
}
@@ -1211,9 +1151,8 @@ debug_get_uint(char *buf)
* prints out actual debug level
*/
-static int
-debug_prolog_pages_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf)
+static int debug_prolog_pages_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf)
{
return sprintf(out_buf, "%i\n", id->pages_per_area);
}
@@ -1222,32 +1161,31 @@ debug_prolog_pages_fn(debug_info_t * id,
* reads new size (number of pages per debug area)
*/
-static int
-debug_input_pages_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset)
+static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
+ int rc, new_pages;
char *str;
- int rc,new_pages;
if (user_len > 0x10000)
- user_len = 0x10000;
- if (*offset != 0){
+ user_len = 0x10000;
+ if (*offset != 0) {
rc = -EPIPE;
goto out;
}
- str = debug_get_user_string(user_buf,user_len);
- if(IS_ERR(str)){
+ str = debug_get_user_string(user_buf, user_len);
+ if (IS_ERR(str)) {
rc = PTR_ERR(str);
goto out;
}
new_pages = debug_get_uint(str);
- if(new_pages < 0){
+ if (new_pages < 0) {
rc = -EINVAL;
goto free_str;
}
- rc = debug_set_size(id,id->nr_areas, new_pages);
- if(rc != 0){
+ rc = debug_set_size(id, id->nr_areas, new_pages);
+ if (rc != 0) {
rc = -EINVAL;
goto free_str;
}
@@ -1262,52 +1200,47 @@ out:
/*
* prints out actual debug level
*/
-
-static int
-debug_prolog_level_fn(debug_info_t * id, struct debug_view *view, char *out_buf)
+static int debug_prolog_level_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf)
{
int rc = 0;
- if(id->level == DEBUG_OFF_LEVEL) {
- rc = sprintf(out_buf,"-\n");
- }
- else {
+ if (id->level == DEBUG_OFF_LEVEL)
+ rc = sprintf(out_buf, "-\n");
+ else
rc = sprintf(out_buf, "%i\n", id->level);
- }
return rc;
}
/*
* reads new debug level
*/
-
-static int
-debug_input_level_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset)
+static int debug_input_level_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
+ int rc, new_level;
char *str;
- int rc,new_level;
if (user_len > 0x10000)
- user_len = 0x10000;
- if (*offset != 0){
+ user_len = 0x10000;
+ if (*offset != 0) {
rc = -EPIPE;
goto out;
}
- str = debug_get_user_string(user_buf,user_len);
- if(IS_ERR(str)){
+ str = debug_get_user_string(user_buf, user_len);
+ if (IS_ERR(str)) {
rc = PTR_ERR(str);
goto out;
}
- if(str[0] == '-'){
+ if (str[0] == '-') {
debug_set_level(id, DEBUG_OFF_LEVEL);
rc = user_len;
goto free_str;
} else {
new_level = debug_get_uint(str);
}
- if(new_level < 0) {
+ if (new_level < 0) {
pr_warn("%s is not a valid level for a debug feature\n", str);
rc = -EINVAL;
} else {
@@ -1321,99 +1254,90 @@ out:
return rc; /* number of input characters */
}
-
/*
* flushes debug areas
*/
-
-static void debug_flush(debug_info_t* id, int area)
+static void debug_flush(debug_info_t *id, int area)
{
- unsigned long flags;
- int i,j;
-
- if(!id || !id->areas)
- return;
- spin_lock_irqsave(&id->lock,flags);
- if(area == DEBUG_FLUSH_ALL){
- id->active_area = 0;
- memset(id->active_entries, 0, id->nr_areas * sizeof(int));
- for (i = 0; i < id->nr_areas; i++) {
+ unsigned long flags;
+ int i, j;
+
+ if (!id || !id->areas)
+ return;
+ spin_lock_irqsave(&id->lock, flags);
+ if (area == DEBUG_FLUSH_ALL) {
+ id->active_area = 0;
+ memset(id->active_entries, 0, id->nr_areas * sizeof(int));
+ for (i = 0; i < id->nr_areas; i++) {
id->active_pages[i] = 0;
- for(j = 0; j < id->pages_per_area; j++) {
- memset(id->areas[i][j], 0, PAGE_SIZE);
- }
+ for (j = 0; j < id->pages_per_area; j++)
+ memset(id->areas[i][j], 0, PAGE_SIZE);
}
- } else if(area >= 0 && area < id->nr_areas) {
- id->active_entries[area] = 0;
+ } else if (area >= 0 && area < id->nr_areas) {
+ id->active_entries[area] = 0;
id->active_pages[area] = 0;
- for(i = 0; i < id->pages_per_area; i++) {
- memset(id->areas[area][i],0,PAGE_SIZE);
- }
- }
- spin_unlock_irqrestore(&id->lock,flags);
+ for (i = 0; i < id->pages_per_area; i++)
+ memset(id->areas[area][i], 0, PAGE_SIZE);
+ }
+ spin_unlock_irqrestore(&id->lock, flags);
}
/*
- * view function: flushes debug areas
+ * view function: flushes debug areas
*/
-
-static int
-debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset)
+static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
- char input_buf[1];
- int rc = user_len;
+ char input_buf[1];
+ int rc = user_len;
if (user_len > 0x10000)
- user_len = 0x10000;
- if (*offset != 0){
+ user_len = 0x10000;
+ if (*offset != 0) {
rc = -EPIPE;
- goto out;
+ goto out;
+ }
+ if (copy_from_user(input_buf, user_buf, 1)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ if (input_buf[0] == '-') {
+ debug_flush(id, DEBUG_FLUSH_ALL);
+ goto out;
+ }
+ if (isdigit(input_buf[0])) {
+ int area = ((int) input_buf[0] - (int) '0');
+
+ debug_flush(id, area);
+ goto out;
}
- if (copy_from_user(input_buf, user_buf, 1)){
- rc = -EFAULT;
- goto out;
- }
- if(input_buf[0] == '-') {
- debug_flush(id, DEBUG_FLUSH_ALL);
- goto out;
- }
- if (isdigit(input_buf[0])) {
- int area = ((int) input_buf[0] - (int) '0');
- debug_flush(id, area);
- goto out;
- }
pr_info("Flushing debug data failed because %c is not a valid "
"area\n", input_buf[0]);
out:
- *offset += user_len;
- return rc; /* number of input characters */
+ *offset += user_len;
+ return rc; /* number of input characters */
}
/*
* prints debug header in raw format
*/
-
-static int
-debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
- int area, debug_entry_t * entry, char *out_buf)
+static int debug_raw_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf)
{
- int rc;
+ int rc;
rc = sizeof(debug_entry_t);
- memcpy(out_buf,entry,sizeof(debug_entry_t));
- return rc;
+ memcpy(out_buf, entry, sizeof(debug_entry_t));
+ return rc;
}
/*
* prints debug data in raw format
*/
-
-static int
-debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
+static int debug_raw_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
int rc;
@@ -1426,20 +1350,17 @@ debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
/*
* prints debug data in hex/ascii format
*/
-
-static int
-debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, const char *in_buf)
+static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
{
int i, rc = 0;
- for (i = 0; i < id->buf_size; i++) {
- rc += sprintf(out_buf + rc, "%02x ",
- ((unsigned char *) in_buf)[i]);
- }
+ for (i = 0; i < id->buf_size; i++)
+ rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]);
rc += sprintf(out_buf + rc, "| ");
for (i = 0; i < id->buf_size; i++) {
unsigned char c = in_buf[i];
+
if (isascii(c) && isprint(c))
rc += sprintf(out_buf + rc, "%c", c);
else
@@ -1452,16 +1373,14 @@ debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
/*
* prints header for debug entry
*/
-
-int
-debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
- int area, debug_entry_t * entry, char *out_buf)
+int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf)
{
unsigned long base, sec, usec;
- char *except_str;
unsigned long caller;
- int rc = 0;
unsigned int level;
+ char *except_str;
+ int rc = 0;
level = entry->id.fields.level;
base = (*(unsigned long *) &tod_clock_base[0]) >> 4;
@@ -1473,7 +1392,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
else
except_str = "-";
caller = (unsigned long) entry->caller;
- rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p ",
+ rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK ",
area, sec, usec, level, except_str,
entry->id.fields.cpuid, (void *)caller);
return rc;
@@ -1487,19 +1406,18 @@ EXPORT_SYMBOL(debug_dflt_header_fn);
#define DEBUG_SPRINTF_MAX_ARGS 10
-static int
-debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, debug_sprintf_entry_t *curr_event)
+static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, debug_sprintf_entry_t *curr_event)
{
- int num_longs, num_used_args = 0,i, rc = 0;
+ int num_longs, num_used_args = 0, i, rc = 0;
int index[DEBUG_SPRINTF_MAX_ARGS];
/* count of longs fit into one entry */
- num_longs = id->buf_size / sizeof(long);
+ num_longs = id->buf_size / sizeof(long);
- if(num_longs < 1)
+ if (num_longs < 1)
goto out; /* bufsize of entry too small */
- if(num_longs == 1) {
+ if (num_longs == 1) {
/* no args, we use only the string */
strcpy(out_buf, curr_event->string);
rc = strlen(curr_event->string);
@@ -1507,22 +1425,20 @@ debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
}
/* number of arguments used for sprintf (without the format string) */
- num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
+ num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
- memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
+ memset(index, 0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
- for(i = 0; i < num_used_args; i++)
+ for (i = 0; i < num_used_args; i++)
index[i] = i;
- rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
- curr_event->args[index[1]], curr_event->args[index[2]],
- curr_event->args[index[3]], curr_event->args[index[4]],
- curr_event->args[index[5]], curr_event->args[index[6]],
- curr_event->args[index[7]], curr_event->args[index[8]],
- curr_event->args[index[9]]);
-
+ rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
+ curr_event->args[index[1]], curr_event->args[index[2]],
+ curr_event->args[index[3]], curr_event->args[index[4]],
+ curr_event->args[index[5]], curr_event->args[index[6]],
+ curr_event->args[index[7]], curr_event->args[index[8]],
+ curr_event->args[index[9]]);
out:
-
return rc;
}
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f7e82302a71e..b2c68fbf2634 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Disassemble s390 instructions.
*
@@ -21,52 +22,91 @@
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-
#include <linux/uaccess.h>
+#include <linux/atomic.h>
#include <asm/dis.h>
#include <asm/io.h>
-#include <linux/atomic.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include <asm/irq.h>
+/* Type of operand */
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_VR 0x10 /* Operand printed as %vx */
+#define OPERAND_DISP 0x20 /* Operand printed as displacement */
+#define OPERAND_BASE 0x40 /* Operand printed as base register */
+#define OPERAND_INDEX 0x80 /* Operand printed as index register */
+#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
+
+struct s390_operand {
+ unsigned char bits; /* The number of bits in the operand. */
+ unsigned char shift; /* The number of bits to shift. */
+ unsigned short flags; /* One bit syntax flags. */
+};
+
+struct s390_insn {
+ union {
+ const char name[5];
+ struct {
+ unsigned char zero;
+ unsigned int offset;
+ } __packed;
+ };
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+struct s390_opcode_offset {
+ unsigned char opcode;
+ unsigned char mask;
+ unsigned char byte;
+ unsigned short offset;
+ unsigned short count;
+} __packed;
+
enum {
- UNUSED, /* Indicates the end of the operand list */
- R_8, /* GPR starting at position 8 */
- R_12, /* GPR starting at position 12 */
- R_16, /* GPR starting at position 16 */
- R_20, /* GPR starting at position 20 */
- R_24, /* GPR starting at position 24 */
- R_28, /* GPR starting at position 28 */
- R_32, /* GPR starting at position 32 */
- F_8, /* FPR starting at position 8 */
- F_12, /* FPR starting at position 12 */
- F_16, /* FPR starting at position 16 */
- F_20, /* FPR starting at position 16 */
- F_24, /* FPR starting at position 24 */
- F_28, /* FPR starting at position 28 */
- F_32, /* FPR starting at position 32 */
+ UNUSED,
A_8, /* Access reg. starting at position 8 */
A_12, /* Access reg. starting at position 12 */
A_24, /* Access reg. starting at position 24 */
A_28, /* Access reg. starting at position 28 */
- C_8, /* Control reg. starting at position 8 */
- C_12, /* Control reg. starting at position 12 */
- V_8, /* Vector reg. starting at position 8, extension bit at 36 */
- V_12, /* Vector reg. starting at position 12, extension bit at 37 */
- V_16, /* Vector reg. starting at position 16, extension bit at 38 */
- V_32, /* Vector reg. starting at position 32, extension bit at 39 */
- W_12, /* Vector reg. at bit 12, extension at bit 37, used as index */
B_16, /* Base register starting at position 16 */
B_32, /* Base register starting at position 32 */
- X_12, /* Index register starting at position 12 */
+ C_8, /* Control reg. starting at position 8 */
+ C_12, /* Control reg. starting at position 12 */
+ D20_20, /* 20 bit displacement starting at 20 */
D_20, /* Displacement starting at position 20 */
D_36, /* Displacement starting at position 36 */
- D20_20, /* 20 bit displacement starting at 20 */
+ F_8, /* FPR starting at position 8 */
+ F_12, /* FPR starting at position 12 */
+ F_16, /* FPR starting at position 16 */
+ F_24, /* FPR starting at position 24 */
+ F_28, /* FPR starting at position 28 */
+ F_32, /* FPR starting at position 32 */
+ I8_8, /* 8 bit signed value starting at 8 */
+ I8_32, /* 8 bit signed value starting at 32 */
+ I16_16, /* 16 bit signed value starting at 16 */
+ I16_32, /* 16 bit signed value starting at 32 */
+ I32_16, /* 32 bit signed value starting at 16 */
+ J12_12, /* 12 bit PC relative offset at 12 */
+ J16_16, /* 16 bit PC relative offset at 16 */
+ J16_32, /* 16 bit PC relative offset at 32 */
+ J24_24, /* 24 bit PC relative offset at 24 */
+ J32_16, /* 32 bit PC relative offset at 16 */
L4_8, /* 4 bit length starting at position 8 */
L4_12, /* 4 bit length starting at position 12 */
L8_8, /* 8 bit length starting at position 8 */
+ R_8, /* GPR starting at position 8 */
+ R_12, /* GPR starting at position 12 */
+ R_16, /* GPR starting at position 16 */
+ R_24, /* GPR starting at position 24 */
+ R_28, /* GPR starting at position 28 */
U4_8, /* 4 bit unsigned value starting at 8 */
U4_12, /* 4 bit unsigned value starting at 12 */
U4_16, /* 4 bit unsigned value starting at 16 */
@@ -78,1651 +118,226 @@ enum {
U8_8, /* 8 bit unsigned value starting at 8 */
U8_16, /* 8 bit unsigned value starting at 16 */
U8_24, /* 8 bit unsigned value starting at 24 */
+ U8_28, /* 8 bit unsigned value starting at 28 */
U8_32, /* 8 bit unsigned value starting at 32 */
- I8_8, /* 8 bit signed value starting at 8 */
- I8_16, /* 8 bit signed value starting at 16 */
- I8_24, /* 8 bit signed value starting at 24 */
- I8_32, /* 8 bit signed value starting at 32 */
- J12_12, /* PC relative offset at 12 */
- I16_16, /* 16 bit signed value starting at 16 */
- I16_32, /* 32 bit signed value starting at 16 */
- U16_16, /* 16 bit unsigned value starting at 16 */
- U16_32, /* 32 bit unsigned value starting at 16 */
- J16_16, /* PC relative jump offset at 16 */
- J16_32, /* PC relative offset at 16 */
- I24_24, /* 24 bit signed value starting at 24 */
- J32_16, /* PC relative long offset at 16 */
- I32_16, /* 32 bit signed value starting at 16 */
- U32_16, /* 32 bit unsigned value starting at 16 */
- M_16, /* 4 bit optional mask starting at 16 */
- M_20, /* 4 bit optional mask starting at 20 */
- M_24, /* 4 bit optional mask starting at 24 */
- M_28, /* 4 bit optional mask starting at 28 */
- M_32, /* 4 bit optional mask starting at 32 */
- RO_28, /* optional GPR starting at position 28 */
-};
-
-/*
- * Enumeration of the different instruction formats.
- * For details consult the principles of operation.
- */
-enum {
- INSTR_INVALID,
- INSTR_E,
- INSTR_IE_UU,
- INSTR_MII_UPI,
- INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
- INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
- INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
- INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
- INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
- INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
- INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
- INSTR_RRE_RR, INSTR_RRE_RR_OPT,
- INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
- INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR,
- INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR,
- INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF,
- INSTR_RRF_UUFR, INSTR_RRF_UURF,
- INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
- INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
- INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
- INSTR_RSI_RRP,
- INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
- INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
- INSTR_RSY_RDRM, INSTR_RSY_RMRD,
- INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
- INSTR_RS_RURD,
- INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
- INSTR_RXF_FRRDF,
- INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
- INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
- INSTR_SIL_RDI, INSTR_SIL_RDU,
- INSTR_SIY_IRD, INSTR_SIY_URD,
- INSTR_SI_URD,
- INSTR_SMI_U0RDP,
- INSTR_SSE_RDRD,
- INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
- INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
- INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
- INSTR_S_00, INSTR_S_RD,
- INSTR_VRI_V0IM, INSTR_VRI_V0I0, INSTR_VRI_V0IIM, INSTR_VRI_VVIM,
- INSTR_VRI_VVV0IM, INSTR_VRI_VVV0I0, INSTR_VRI_VVIMM,
- INSTR_VRR_VV00MMM, INSTR_VRR_VV000MM, INSTR_VRR_VV0000M,
- INSTR_VRR_VV00000, INSTR_VRR_VVV0M0M, INSTR_VRR_VV00M0M,
- INSTR_VRR_VVV000M, INSTR_VRR_VVV000V, INSTR_VRR_VVV0000,
- INSTR_VRR_VVV0MMM, INSTR_VRR_VVV00MM, INSTR_VRR_VVVMM0V,
- INSTR_VRR_VVVM0MV, INSTR_VRR_VVVM00V, INSTR_VRR_VRR0000,
- INSTR_VRS_VVRDM, INSTR_VRS_VVRD0, INSTR_VRS_VRRDM, INSTR_VRS_VRRD0,
- INSTR_VRS_RVRDM,
- INSTR_VRV_VVRDM, INSTR_VRV_VWRDM,
- INSTR_VRX_VRRDM, INSTR_VRX_VRRD0,
+ U12_16, /* 12 bit unsigned value starting at 16 */
+ U16_16, /* 16 bit unsigned value starting at 16 */
+ U16_32, /* 16 bit unsigned value starting at 32 */
+ U32_16, /* 32 bit unsigned value starting at 16 */
+ VX_12, /* Vector index register starting at position 12 */
+ V_8, /* Vector reg. starting at position 8 */
+ V_12, /* Vector reg. starting at position 12 */
+ V_16, /* Vector reg. starting at position 16 */
+ V_32, /* Vector reg. starting at position 32 */
+ X_12, /* Index register starting at position 12 */
};
-static const struct s390_operand operands[] =
-{
- [UNUSED] = { 0, 0, 0 },
- [R_8] = { 4, 8, OPERAND_GPR },
- [R_12] = { 4, 12, OPERAND_GPR },
- [R_16] = { 4, 16, OPERAND_GPR },
- [R_20] = { 4, 20, OPERAND_GPR },
- [R_24] = { 4, 24, OPERAND_GPR },
- [R_28] = { 4, 28, OPERAND_GPR },
- [R_32] = { 4, 32, OPERAND_GPR },
- [F_8] = { 4, 8, OPERAND_FPR },
- [F_12] = { 4, 12, OPERAND_FPR },
- [F_16] = { 4, 16, OPERAND_FPR },
- [F_20] = { 4, 16, OPERAND_FPR },
- [F_24] = { 4, 24, OPERAND_FPR },
- [F_28] = { 4, 28, OPERAND_FPR },
- [F_32] = { 4, 32, OPERAND_FPR },
+static const struct s390_operand operands[] = {
+ [UNUSED] = { 0, 0, 0 },
[A_8] = { 4, 8, OPERAND_AR },
[A_12] = { 4, 12, OPERAND_AR },
[A_24] = { 4, 24, OPERAND_AR },
[A_28] = { 4, 28, OPERAND_AR },
- [C_8] = { 4, 8, OPERAND_CR },
- [C_12] = { 4, 12, OPERAND_CR },
- [V_8] = { 4, 8, OPERAND_VR },
- [V_12] = { 4, 12, OPERAND_VR },
- [V_16] = { 4, 16, OPERAND_VR },
- [V_32] = { 4, 32, OPERAND_VR },
- [W_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
[B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
[B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
- [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
+ [C_8] = { 4, 8, OPERAND_CR },
+ [C_12] = { 4, 12, OPERAND_CR },
+ [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
[D_20] = { 12, 20, OPERAND_DISP },
[D_36] = { 12, 36, OPERAND_DISP },
- [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
+ [F_8] = { 4, 8, OPERAND_FPR },
+ [F_12] = { 4, 12, OPERAND_FPR },
+ [F_16] = { 4, 16, OPERAND_FPR },
+ [F_24] = { 4, 24, OPERAND_FPR },
+ [F_28] = { 4, 28, OPERAND_FPR },
+ [F_32] = { 4, 32, OPERAND_FPR },
+ [I8_8] = { 8, 8, OPERAND_SIGNED },
+ [I8_32] = { 8, 32, OPERAND_SIGNED },
+ [I16_16] = { 16, 16, OPERAND_SIGNED },
+ [I16_32] = { 16, 32, OPERAND_SIGNED },
+ [I32_16] = { 32, 16, OPERAND_SIGNED },
+ [J12_12] = { 12, 12, OPERAND_PCREL },
+ [J16_16] = { 16, 16, OPERAND_PCREL },
+ [J16_32] = { 16, 32, OPERAND_PCREL },
+ [J24_24] = { 24, 24, OPERAND_PCREL },
+ [J32_16] = { 32, 16, OPERAND_PCREL },
[L4_8] = { 4, 8, OPERAND_LENGTH },
- [L4_12] = { 4, 12, OPERAND_LENGTH },
+ [L4_12] = { 4, 12, OPERAND_LENGTH },
[L8_8] = { 8, 8, OPERAND_LENGTH },
+ [R_8] = { 4, 8, OPERAND_GPR },
+ [R_12] = { 4, 12, OPERAND_GPR },
+ [R_16] = { 4, 16, OPERAND_GPR },
+ [R_24] = { 4, 24, OPERAND_GPR },
+ [R_28] = { 4, 28, OPERAND_GPR },
[U4_8] = { 4, 8, 0 },
- [U4_12] = { 4, 12, 0 },
- [U4_16] = { 4, 16, 0 },
- [U4_20] = { 4, 20, 0 },
- [U4_24] = { 4, 24, 0 },
- [U4_28] = { 4, 28, 0 },
- [U4_32] = { 4, 32, 0 },
- [U4_36] = { 4, 36, 0 },
+ [U4_12] = { 4, 12, 0 },
+ [U4_16] = { 4, 16, 0 },
+ [U4_20] = { 4, 20, 0 },
+ [U4_24] = { 4, 24, 0 },
+ [U4_28] = { 4, 28, 0 },
+ [U4_32] = { 4, 32, 0 },
+ [U4_36] = { 4, 36, 0 },
[U8_8] = { 8, 8, 0 },
- [U8_16] = { 8, 16, 0 },
- [U8_24] = { 8, 24, 0 },
- [U8_32] = { 8, 32, 0 },
- [J12_12] = { 12, 12, OPERAND_PCREL },
- [I8_8] = { 8, 8, OPERAND_SIGNED },
- [I8_16] = { 8, 16, OPERAND_SIGNED },
- [I8_24] = { 8, 24, OPERAND_SIGNED },
- [I8_32] = { 8, 32, OPERAND_SIGNED },
- [I16_32] = { 16, 32, OPERAND_SIGNED },
- [I16_16] = { 16, 16, OPERAND_SIGNED },
+ [U8_16] = { 8, 16, 0 },
+ [U8_24] = { 8, 24, 0 },
+ [U8_28] = { 8, 28, 0 },
+ [U8_32] = { 8, 32, 0 },
+ [U12_16] = { 12, 16, 0 },
[U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
- [J16_16] = { 16, 16, OPERAND_PCREL },
- [J16_32] = { 16, 32, OPERAND_PCREL },
- [I24_24] = { 24, 24, OPERAND_SIGNED },
- [J32_16] = { 32, 16, OPERAND_PCREL },
- [I32_16] = { 32, 16, OPERAND_SIGNED },
[U32_16] = { 32, 16, 0 },
- [M_16] = { 4, 16, 0 },
- [M_20] = { 4, 20, 0 },
- [M_24] = { 4, 24, 0 },
- [M_28] = { 4, 28, 0 },
- [M_32] = { 4, 32, 0 },
- [RO_28] = { 4, 28, OPERAND_GPR }
-};
-
-static const unsigned char formats[][7] = {
- [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
- [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 },
- [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 },
- [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 },
- [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
- [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
- [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
- [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
- [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
- [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
- [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 },
- [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
- [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
- [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
- [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 },
- [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 },
- [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 },
- [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 },
- [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 },
- [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 },
- [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 },
- [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 },
- [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 },
- [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 },
- [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 },
- [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 },
- [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 },
- [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 },
- [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 },
- [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 },
- [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 },
- [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 },
- [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 },
- [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },
- [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 },
- [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 },
- [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 },
- [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
- [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
- [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
- [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 },
- [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
- [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
- [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
- [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 },
- [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
- [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
- [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
- [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
- [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
- [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 },
- [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 },
- [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
- [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
- [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
- [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 },
- [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 },
- [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 },
- [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 },
- [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
- [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
- [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
- [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
- [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 },
- [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
- [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
- [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
- [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
- [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
- [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
- [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
- [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
- [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
- [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
- [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
- [INSTR_RXE_RRRDM] = { 0xff, R_8,D_20,X_12,B_16,M_32,0 },
- [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
- [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
- [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
- [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 },
- [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
- [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
- [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 },
- [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 },
- [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 },
- [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
- [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
- [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
- [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 },
- [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
- [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 },
- [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 },
- [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
- [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
- [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
- [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
- [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
- [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
- [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
- [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
- [INSTR_VRI_V0IM] = { 0xff, V_8,I16_16,M_32,0,0,0 },
- [INSTR_VRI_V0I0] = { 0xff, V_8,I16_16,0,0,0,0 },
- [INSTR_VRI_V0IIM] = { 0xff, V_8,I8_16,I8_24,M_32,0,0 },
- [INSTR_VRI_VVIM] = { 0xff, V_8,I16_16,V_12,M_32,0,0 },
- [INSTR_VRI_VVV0IM]= { 0xff, V_8,V_12,V_16,I8_24,M_32,0 },
- [INSTR_VRI_VVV0I0]= { 0xff, V_8,V_12,V_16,I8_24,0,0 },
- [INSTR_VRI_VVIMM] = { 0xff, V_8,V_12,I16_16,M_32,M_28,0 },
- [INSTR_VRR_VV00MMM]={ 0xff, V_8,V_12,M_32,M_28,M_24,0 },
- [INSTR_VRR_VV000MM]={ 0xff, V_8,V_12,M_32,M_28,0,0 },
- [INSTR_VRR_VV0000M]={ 0xff, V_8,V_12,M_32,0,0,0 },
- [INSTR_VRR_VV00000]={ 0xff, V_8,V_12,0,0,0,0 },
- [INSTR_VRR_VVV0M0M]={ 0xff, V_8,V_12,V_16,M_32,M_24,0 },
- [INSTR_VRR_VV00M0M]={ 0xff, V_8,V_12,M_32,M_24,0,0 },
- [INSTR_VRR_VVV000M]={ 0xff, V_8,V_12,V_16,M_32,0,0 },
- [INSTR_VRR_VVV000V]={ 0xff, V_8,V_12,V_16,V_32,0,0 },
- [INSTR_VRR_VVV0000]={ 0xff, V_8,V_12,V_16,0,0,0 },
- [INSTR_VRR_VVV0MMM]={ 0xff, V_8,V_12,V_16,M_32,M_28,M_24 },
- [INSTR_VRR_VVV00MM]={ 0xff, V_8,V_12,V_16,M_32,M_28,0 },
- [INSTR_VRR_VVVMM0V]={ 0xff, V_8,V_12,V_16,V_32,M_20,M_24 },
- [INSTR_VRR_VVVM0MV]={ 0xff, V_8,V_12,V_16,V_32,M_28,M_20 },
- [INSTR_VRR_VVVM00V]={ 0xff, V_8,V_12,V_16,V_32,M_20,0 },
- [INSTR_VRR_VRR0000]={ 0xff, V_8,R_12,R_16,0,0,0 },
- [INSTR_VRS_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
- [INSTR_VRS_VVRD0] = { 0xff, V_8,V_12,D_20,B_16,0,0 },
- [INSTR_VRS_VRRDM] = { 0xff, V_8,R_12,D_20,B_16,M_32,0 },
- [INSTR_VRS_VRRD0] = { 0xff, V_8,R_12,D_20,B_16,0,0 },
- [INSTR_VRS_RVRDM] = { 0xff, R_8,V_12,D_20,B_16,M_32,0 },
- [INSTR_VRV_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
- [INSTR_VRV_VWRDM] = { 0xff, V_8,D_20,W_12,B_16,M_32,0 },
- [INSTR_VRX_VRRDM] = { 0xff, V_8,D_20,X_12,B_16,M_32,0 },
- [INSTR_VRX_VRRD0] = { 0xff, V_8,D_20,X_12,B_16,0,0 },
-};
-
-enum {
- LONG_INSN_ALGHSIK,
- LONG_INSN_ALHHHR,
- LONG_INSN_ALHHLR,
- LONG_INSN_ALHSIK,
- LONG_INSN_ALSIHN,
- LONG_INSN_CDFBRA,
- LONG_INSN_CDGBRA,
- LONG_INSN_CDGTRA,
- LONG_INSN_CDLFBR,
- LONG_INSN_CDLFTR,
- LONG_INSN_CDLGBR,
- LONG_INSN_CDLGTR,
- LONG_INSN_CEFBRA,
- LONG_INSN_CEGBRA,
- LONG_INSN_CELFBR,
- LONG_INSN_CELGBR,
- LONG_INSN_CFDBRA,
- LONG_INSN_CFEBRA,
- LONG_INSN_CFXBRA,
- LONG_INSN_CGDBRA,
- LONG_INSN_CGDTRA,
- LONG_INSN_CGEBRA,
- LONG_INSN_CGXBRA,
- LONG_INSN_CGXTRA,
- LONG_INSN_CLFDBR,
- LONG_INSN_CLFDTR,
- LONG_INSN_CLFEBR,
- LONG_INSN_CLFHSI,
- LONG_INSN_CLFXBR,
- LONG_INSN_CLFXTR,
- LONG_INSN_CLGDBR,
- LONG_INSN_CLGDTR,
- LONG_INSN_CLGEBR,
- LONG_INSN_CLGFRL,
- LONG_INSN_CLGHRL,
- LONG_INSN_CLGHSI,
- LONG_INSN_CLGXBR,
- LONG_INSN_CLGXTR,
- LONG_INSN_CLHHSI,
- LONG_INSN_CXFBRA,
- LONG_INSN_CXGBRA,
- LONG_INSN_CXGTRA,
- LONG_INSN_CXLFBR,
- LONG_INSN_CXLFTR,
- LONG_INSN_CXLGBR,
- LONG_INSN_CXLGTR,
- LONG_INSN_FIDBRA,
- LONG_INSN_FIEBRA,
- LONG_INSN_FIXBRA,
- LONG_INSN_LDXBRA,
- LONG_INSN_LEDBRA,
- LONG_INSN_LEXBRA,
- LONG_INSN_LLGFAT,
- LONG_INSN_LLGFRL,
- LONG_INSN_LLGHRL,
- LONG_INSN_LLGTAT,
- LONG_INSN_POPCNT,
- LONG_INSN_RIEMIT,
- LONG_INSN_RINEXT,
- LONG_INSN_RISBGN,
- LONG_INSN_RISBHG,
- LONG_INSN_RISBLG,
- LONG_INSN_SLHHHR,
- LONG_INSN_SLHHLR,
- LONG_INSN_TABORT,
- LONG_INSN_TBEGIN,
- LONG_INSN_TBEGINC,
- LONG_INSN_PCISTG,
- LONG_INSN_MPCIFC,
- LONG_INSN_STPCIFC,
- LONG_INSN_PCISTB,
- LONG_INSN_VPOPCT,
- LONG_INSN_VERLLV,
- LONG_INSN_VESRAV,
- LONG_INSN_VESRLV,
- LONG_INSN_VSBCBI,
- LONG_INSN_STCCTM
-};
-
-static char *long_insn_name[] = {
- [LONG_INSN_ALGHSIK] = "alghsik",
- [LONG_INSN_ALHHHR] = "alhhhr",
- [LONG_INSN_ALHHLR] = "alhhlr",
- [LONG_INSN_ALHSIK] = "alhsik",
- [LONG_INSN_ALSIHN] = "alsihn",
- [LONG_INSN_CDFBRA] = "cdfbra",
- [LONG_INSN_CDGBRA] = "cdgbra",
- [LONG_INSN_CDGTRA] = "cdgtra",
- [LONG_INSN_CDLFBR] = "cdlfbr",
- [LONG_INSN_CDLFTR] = "cdlftr",
- [LONG_INSN_CDLGBR] = "cdlgbr",
- [LONG_INSN_CDLGTR] = "cdlgtr",
- [LONG_INSN_CEFBRA] = "cefbra",
- [LONG_INSN_CEGBRA] = "cegbra",
- [LONG_INSN_CELFBR] = "celfbr",
- [LONG_INSN_CELGBR] = "celgbr",
- [LONG_INSN_CFDBRA] = "cfdbra",
- [LONG_INSN_CFEBRA] = "cfebra",
- [LONG_INSN_CFXBRA] = "cfxbra",
- [LONG_INSN_CGDBRA] = "cgdbra",
- [LONG_INSN_CGDTRA] = "cgdtra",
- [LONG_INSN_CGEBRA] = "cgebra",
- [LONG_INSN_CGXBRA] = "cgxbra",
- [LONG_INSN_CGXTRA] = "cgxtra",
- [LONG_INSN_CLFDBR] = "clfdbr",
- [LONG_INSN_CLFDTR] = "clfdtr",
- [LONG_INSN_CLFEBR] = "clfebr",
- [LONG_INSN_CLFHSI] = "clfhsi",
- [LONG_INSN_CLFXBR] = "clfxbr",
- [LONG_INSN_CLFXTR] = "clfxtr",
- [LONG_INSN_CLGDBR] = "clgdbr",
- [LONG_INSN_CLGDTR] = "clgdtr",
- [LONG_INSN_CLGEBR] = "clgebr",
- [LONG_INSN_CLGFRL] = "clgfrl",
- [LONG_INSN_CLGHRL] = "clghrl",
- [LONG_INSN_CLGHSI] = "clghsi",
- [LONG_INSN_CLGXBR] = "clgxbr",
- [LONG_INSN_CLGXTR] = "clgxtr",
- [LONG_INSN_CLHHSI] = "clhhsi",
- [LONG_INSN_CXFBRA] = "cxfbra",
- [LONG_INSN_CXGBRA] = "cxgbra",
- [LONG_INSN_CXGTRA] = "cxgtra",
- [LONG_INSN_CXLFBR] = "cxlfbr",
- [LONG_INSN_CXLFTR] = "cxlftr",
- [LONG_INSN_CXLGBR] = "cxlgbr",
- [LONG_INSN_CXLGTR] = "cxlgtr",
- [LONG_INSN_FIDBRA] = "fidbra",
- [LONG_INSN_FIEBRA] = "fiebra",
- [LONG_INSN_FIXBRA] = "fixbra",
- [LONG_INSN_LDXBRA] = "ldxbra",
- [LONG_INSN_LEDBRA] = "ledbra",
- [LONG_INSN_LEXBRA] = "lexbra",
- [LONG_INSN_LLGFAT] = "llgfat",
- [LONG_INSN_LLGFRL] = "llgfrl",
- [LONG_INSN_LLGHRL] = "llghrl",
- [LONG_INSN_LLGTAT] = "llgtat",
- [LONG_INSN_POPCNT] = "popcnt",
- [LONG_INSN_RIEMIT] = "riemit",
- [LONG_INSN_RINEXT] = "rinext",
- [LONG_INSN_RISBGN] = "risbgn",
- [LONG_INSN_RISBHG] = "risbhg",
- [LONG_INSN_RISBLG] = "risblg",
- [LONG_INSN_SLHHHR] = "slhhhr",
- [LONG_INSN_SLHHLR] = "slhhlr",
- [LONG_INSN_TABORT] = "tabort",
- [LONG_INSN_TBEGIN] = "tbegin",
- [LONG_INSN_TBEGINC] = "tbeginc",
- [LONG_INSN_PCISTG] = "pcistg",
- [LONG_INSN_MPCIFC] = "mpcifc",
- [LONG_INSN_STPCIFC] = "stpcifc",
- [LONG_INSN_PCISTB] = "pcistb",
- [LONG_INSN_VPOPCT] = "vpopct",
- [LONG_INSN_VERLLV] = "verllv",
- [LONG_INSN_VESRAV] = "vesrav",
- [LONG_INSN_VESRLV] = "vesrlv",
- [LONG_INSN_VSBCBI] = "vsbcbi",
- [LONG_INSN_STCCTM] = "stcctm",
-};
-
-static struct s390_insn opcode[] = {
- { "bprp", 0xc5, INSTR_MII_UPI },
- { "bpp", 0xc7, INSTR_SMI_U0RDP },
- { "trtr", 0xd0, INSTR_SS_L0RDRD },
- { "lmd", 0xef, INSTR_SS_RRRDRD3 },
- { "spm", 0x04, INSTR_RR_R0 },
- { "balr", 0x05, INSTR_RR_RR },
- { "bctr", 0x06, INSTR_RR_RR },
- { "bcr", 0x07, INSTR_RR_UR },
- { "svc", 0x0a, INSTR_RR_U0 },
- { "bsm", 0x0b, INSTR_RR_RR },
- { "bassm", 0x0c, INSTR_RR_RR },
- { "basr", 0x0d, INSTR_RR_RR },
- { "mvcl", 0x0e, INSTR_RR_RR },
- { "clcl", 0x0f, INSTR_RR_RR },
- { "lpr", 0x10, INSTR_RR_RR },
- { "lnr", 0x11, INSTR_RR_RR },
- { "ltr", 0x12, INSTR_RR_RR },
- { "lcr", 0x13, INSTR_RR_RR },
- { "nr", 0x14, INSTR_RR_RR },
- { "clr", 0x15, INSTR_RR_RR },
- { "or", 0x16, INSTR_RR_RR },
- { "xr", 0x17, INSTR_RR_RR },
- { "lr", 0x18, INSTR_RR_RR },
- { "cr", 0x19, INSTR_RR_RR },
- { "ar", 0x1a, INSTR_RR_RR },
- { "sr", 0x1b, INSTR_RR_RR },
- { "mr", 0x1c, INSTR_RR_RR },
- { "dr", 0x1d, INSTR_RR_RR },
- { "alr", 0x1e, INSTR_RR_RR },
- { "slr", 0x1f, INSTR_RR_RR },
- { "lpdr", 0x20, INSTR_RR_FF },
- { "lndr", 0x21, INSTR_RR_FF },
- { "ltdr", 0x22, INSTR_RR_FF },
- { "lcdr", 0x23, INSTR_RR_FF },
- { "hdr", 0x24, INSTR_RR_FF },
- { "ldxr", 0x25, INSTR_RR_FF },
- { "mxr", 0x26, INSTR_RR_FF },
- { "mxdr", 0x27, INSTR_RR_FF },
- { "ldr", 0x28, INSTR_RR_FF },
- { "cdr", 0x29, INSTR_RR_FF },
- { "adr", 0x2a, INSTR_RR_FF },
- { "sdr", 0x2b, INSTR_RR_FF },
- { "mdr", 0x2c, INSTR_RR_FF },
- { "ddr", 0x2d, INSTR_RR_FF },
- { "awr", 0x2e, INSTR_RR_FF },
- { "swr", 0x2f, INSTR_RR_FF },
- { "lper", 0x30, INSTR_RR_FF },
- { "lner", 0x31, INSTR_RR_FF },
- { "lter", 0x32, INSTR_RR_FF },
- { "lcer", 0x33, INSTR_RR_FF },
- { "her", 0x34, INSTR_RR_FF },
- { "ledr", 0x35, INSTR_RR_FF },
- { "axr", 0x36, INSTR_RR_FF },
- { "sxr", 0x37, INSTR_RR_FF },
- { "ler", 0x38, INSTR_RR_FF },
- { "cer", 0x39, INSTR_RR_FF },
- { "aer", 0x3a, INSTR_RR_FF },
- { "ser", 0x3b, INSTR_RR_FF },
- { "mder", 0x3c, INSTR_RR_FF },
- { "der", 0x3d, INSTR_RR_FF },
- { "aur", 0x3e, INSTR_RR_FF },
- { "sur", 0x3f, INSTR_RR_FF },
- { "sth", 0x40, INSTR_RX_RRRD },
- { "la", 0x41, INSTR_RX_RRRD },
- { "stc", 0x42, INSTR_RX_RRRD },
- { "ic", 0x43, INSTR_RX_RRRD },
- { "ex", 0x44, INSTR_RX_RRRD },
- { "bal", 0x45, INSTR_RX_RRRD },
- { "bct", 0x46, INSTR_RX_RRRD },
- { "bc", 0x47, INSTR_RX_URRD },
- { "lh", 0x48, INSTR_RX_RRRD },
- { "ch", 0x49, INSTR_RX_RRRD },
- { "ah", 0x4a, INSTR_RX_RRRD },
- { "sh", 0x4b, INSTR_RX_RRRD },
- { "mh", 0x4c, INSTR_RX_RRRD },
- { "bas", 0x4d, INSTR_RX_RRRD },
- { "cvd", 0x4e, INSTR_RX_RRRD },
- { "cvb", 0x4f, INSTR_RX_RRRD },
- { "st", 0x50, INSTR_RX_RRRD },
- { "lae", 0x51, INSTR_RX_RRRD },
- { "n", 0x54, INSTR_RX_RRRD },
- { "cl", 0x55, INSTR_RX_RRRD },
- { "o", 0x56, INSTR_RX_RRRD },
- { "x", 0x57, INSTR_RX_RRRD },
- { "l", 0x58, INSTR_RX_RRRD },
- { "c", 0x59, INSTR_RX_RRRD },
- { "a", 0x5a, INSTR_RX_RRRD },
- { "s", 0x5b, INSTR_RX_RRRD },
- { "m", 0x5c, INSTR_RX_RRRD },
- { "d", 0x5d, INSTR_RX_RRRD },
- { "al", 0x5e, INSTR_RX_RRRD },
- { "sl", 0x5f, INSTR_RX_RRRD },
- { "std", 0x60, INSTR_RX_FRRD },
- { "mxd", 0x67, INSTR_RX_FRRD },
- { "ld", 0x68, INSTR_RX_FRRD },
- { "cd", 0x69, INSTR_RX_FRRD },
- { "ad", 0x6a, INSTR_RX_FRRD },
- { "sd", 0x6b, INSTR_RX_FRRD },
- { "md", 0x6c, INSTR_RX_FRRD },
- { "dd", 0x6d, INSTR_RX_FRRD },
- { "aw", 0x6e, INSTR_RX_FRRD },
- { "sw", 0x6f, INSTR_RX_FRRD },
- { "ste", 0x70, INSTR_RX_FRRD },
- { "ms", 0x71, INSTR_RX_RRRD },
- { "le", 0x78, INSTR_RX_FRRD },
- { "ce", 0x79, INSTR_RX_FRRD },
- { "ae", 0x7a, INSTR_RX_FRRD },
- { "se", 0x7b, INSTR_RX_FRRD },
- { "mde", 0x7c, INSTR_RX_FRRD },
- { "de", 0x7d, INSTR_RX_FRRD },
- { "au", 0x7e, INSTR_RX_FRRD },
- { "su", 0x7f, INSTR_RX_FRRD },
- { "ssm", 0x80, INSTR_S_RD },
- { "lpsw", 0x82, INSTR_S_RD },
- { "diag", 0x83, INSTR_RS_RRRD },
- { "brxh", 0x84, INSTR_RSI_RRP },
- { "brxle", 0x85, INSTR_RSI_RRP },
- { "bxh", 0x86, INSTR_RS_RRRD },
- { "bxle", 0x87, INSTR_RS_RRRD },
- { "srl", 0x88, INSTR_RS_R0RD },
- { "sll", 0x89, INSTR_RS_R0RD },
- { "sra", 0x8a, INSTR_RS_R0RD },
- { "sla", 0x8b, INSTR_RS_R0RD },
- { "srdl", 0x8c, INSTR_RS_R0RD },
- { "sldl", 0x8d, INSTR_RS_R0RD },
- { "srda", 0x8e, INSTR_RS_R0RD },
- { "slda", 0x8f, INSTR_RS_R0RD },
- { "stm", 0x90, INSTR_RS_RRRD },
- { "tm", 0x91, INSTR_SI_URD },
- { "mvi", 0x92, INSTR_SI_URD },
- { "ts", 0x93, INSTR_S_RD },
- { "ni", 0x94, INSTR_SI_URD },
- { "cli", 0x95, INSTR_SI_URD },
- { "oi", 0x96, INSTR_SI_URD },
- { "xi", 0x97, INSTR_SI_URD },
- { "lm", 0x98, INSTR_RS_RRRD },
- { "trace", 0x99, INSTR_RS_RRRD },
- { "lam", 0x9a, INSTR_RS_AARD },
- { "stam", 0x9b, INSTR_RS_AARD },
- { "mvcle", 0xa8, INSTR_RS_RRRD },
- { "clcle", 0xa9, INSTR_RS_RRRD },
- { "stnsm", 0xac, INSTR_SI_URD },
- { "stosm", 0xad, INSTR_SI_URD },
- { "sigp", 0xae, INSTR_RS_RRRD },
- { "mc", 0xaf, INSTR_SI_URD },
- { "lra", 0xb1, INSTR_RX_RRRD },
- { "stctl", 0xb6, INSTR_RS_CCRD },
- { "lctl", 0xb7, INSTR_RS_CCRD },
- { "cs", 0xba, INSTR_RS_RRRD },
- { "cds", 0xbb, INSTR_RS_RRRD },
- { "clm", 0xbd, INSTR_RS_RURD },
- { "stcm", 0xbe, INSTR_RS_RURD },
- { "icm", 0xbf, INSTR_RS_RURD },
- { "mvn", 0xd1, INSTR_SS_L0RDRD },
- { "mvc", 0xd2, INSTR_SS_L0RDRD },
- { "mvz", 0xd3, INSTR_SS_L0RDRD },
- { "nc", 0xd4, INSTR_SS_L0RDRD },
- { "clc", 0xd5, INSTR_SS_L0RDRD },
- { "oc", 0xd6, INSTR_SS_L0RDRD },
- { "xc", 0xd7, INSTR_SS_L0RDRD },
- { "mvck", 0xd9, INSTR_SS_RRRDRD },
- { "mvcp", 0xda, INSTR_SS_RRRDRD },
- { "mvcs", 0xdb, INSTR_SS_RRRDRD },
- { "tr", 0xdc, INSTR_SS_L0RDRD },
- { "trt", 0xdd, INSTR_SS_L0RDRD },
- { "ed", 0xde, INSTR_SS_L0RDRD },
- { "edmk", 0xdf, INSTR_SS_L0RDRD },
- { "pku", 0xe1, INSTR_SS_L0RDRD },
- { "unpku", 0xe2, INSTR_SS_L0RDRD },
- { "mvcin", 0xe8, INSTR_SS_L0RDRD },
- { "pka", 0xe9, INSTR_SS_L0RDRD },
- { "unpka", 0xea, INSTR_SS_L0RDRD },
- { "plo", 0xee, INSTR_SS_RRRDRD2 },
- { "srp", 0xf0, INSTR_SS_LIRDRD },
- { "mvo", 0xf1, INSTR_SS_LLRDRD },
- { "pack", 0xf2, INSTR_SS_LLRDRD },
- { "unpk", 0xf3, INSTR_SS_LLRDRD },
- { "zap", 0xf8, INSTR_SS_LLRDRD },
- { "cp", 0xf9, INSTR_SS_LLRDRD },
- { "ap", 0xfa, INSTR_SS_LLRDRD },
- { "sp", 0xfb, INSTR_SS_LLRDRD },
- { "mp", 0xfc, INSTR_SS_LLRDRD },
- { "dp", 0xfd, INSTR_SS_LLRDRD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_01[] = {
- { "ptff", 0x04, INSTR_E },
- { "pfpo", 0x0a, INSTR_E },
- { "sam64", 0x0e, INSTR_E },
- { "pr", 0x01, INSTR_E },
- { "upt", 0x02, INSTR_E },
- { "sckpf", 0x07, INSTR_E },
- { "tam", 0x0b, INSTR_E },
- { "sam24", 0x0c, INSTR_E },
- { "sam31", 0x0d, INSTR_E },
- { "trap2", 0xff, INSTR_E },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_a5[] = {
- { "iihh", 0x00, INSTR_RI_RU },
- { "iihl", 0x01, INSTR_RI_RU },
- { "iilh", 0x02, INSTR_RI_RU },
- { "iill", 0x03, INSTR_RI_RU },
- { "nihh", 0x04, INSTR_RI_RU },
- { "nihl", 0x05, INSTR_RI_RU },
- { "nilh", 0x06, INSTR_RI_RU },
- { "nill", 0x07, INSTR_RI_RU },
- { "oihh", 0x08, INSTR_RI_RU },
- { "oihl", 0x09, INSTR_RI_RU },
- { "oilh", 0x0a, INSTR_RI_RU },
- { "oill", 0x0b, INSTR_RI_RU },
- { "llihh", 0x0c, INSTR_RI_RU },
- { "llihl", 0x0d, INSTR_RI_RU },
- { "llilh", 0x0e, INSTR_RI_RU },
- { "llill", 0x0f, INSTR_RI_RU },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_a7[] = {
- { "tmhh", 0x02, INSTR_RI_RU },
- { "tmhl", 0x03, INSTR_RI_RU },
- { "brctg", 0x07, INSTR_RI_RP },
- { "lghi", 0x09, INSTR_RI_RI },
- { "aghi", 0x0b, INSTR_RI_RI },
- { "mghi", 0x0d, INSTR_RI_RI },
- { "cghi", 0x0f, INSTR_RI_RI },
- { "tmlh", 0x00, INSTR_RI_RU },
- { "tmll", 0x01, INSTR_RI_RU },
- { "brc", 0x04, INSTR_RI_UP },
- { "bras", 0x05, INSTR_RI_RP },
- { "brct", 0x06, INSTR_RI_RP },
- { "lhi", 0x08, INSTR_RI_RI },
- { "ahi", 0x0a, INSTR_RI_RI },
- { "mhi", 0x0c, INSTR_RI_RI },
- { "chi", 0x0e, INSTR_RI_RI },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_aa[] = {
- { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
- { "rion", 0x01, INSTR_RI_RI },
- { "tric", 0x02, INSTR_RI_RI },
- { "rioff", 0x03, INSTR_RI_RI },
- { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_b2[] = {
- { "stckf", 0x7c, INSTR_S_RD },
- { "lpp", 0x80, INSTR_S_RD },
- { "lcctl", 0x84, INSTR_S_RD },
- { "lpctl", 0x85, INSTR_S_RD },
- { "qsi", 0x86, INSTR_S_RD },
- { "lsctl", 0x87, INSTR_S_RD },
- { "qctri", 0x8e, INSTR_S_RD },
- { "stfle", 0xb0, INSTR_S_RD },
- { "lpswe", 0xb2, INSTR_S_RD },
- { "srnmb", 0xb8, INSTR_S_RD },
- { "srnmt", 0xb9, INSTR_S_RD },
- { "lfas", 0xbd, INSTR_S_RD },
- { "scctr", 0xe0, INSTR_RRE_RR },
- { "spctr", 0xe1, INSTR_RRE_RR },
- { "ecctr", 0xe4, INSTR_RRE_RR },
- { "epctr", 0xe5, INSTR_RRE_RR },
- { "ppa", 0xe8, INSTR_RRF_U0RR },
- { "etnd", 0xec, INSTR_RRE_R0 },
- { "ecpga", 0xed, INSTR_RRE_RR },
- { "tend", 0xf8, INSTR_S_00 },
- { "niai", 0xfa, INSTR_IE_UU },
- { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
- { "stidp", 0x02, INSTR_S_RD },
- { "sck", 0x04, INSTR_S_RD },
- { "stck", 0x05, INSTR_S_RD },
- { "sckc", 0x06, INSTR_S_RD },
- { "stckc", 0x07, INSTR_S_RD },
- { "spt", 0x08, INSTR_S_RD },
- { "stpt", 0x09, INSTR_S_RD },
- { "spka", 0x0a, INSTR_S_RD },
- { "ipk", 0x0b, INSTR_S_00 },
- { "ptlb", 0x0d, INSTR_S_00 },
- { "spx", 0x10, INSTR_S_RD },
- { "stpx", 0x11, INSTR_S_RD },
- { "stap", 0x12, INSTR_S_RD },
- { "sie", 0x14, INSTR_S_RD },
- { "pc", 0x18, INSTR_S_RD },
- { "sac", 0x19, INSTR_S_RD },
- { "cfc", 0x1a, INSTR_S_RD },
- { "servc", 0x20, INSTR_RRE_RR },
- { "ipte", 0x21, INSTR_RRE_RR },
- { "ipm", 0x22, INSTR_RRE_R0 },
- { "ivsk", 0x23, INSTR_RRE_RR },
- { "iac", 0x24, INSTR_RRE_R0 },
- { "ssar", 0x25, INSTR_RRE_R0 },
- { "epar", 0x26, INSTR_RRE_R0 },
- { "esar", 0x27, INSTR_RRE_R0 },
- { "pt", 0x28, INSTR_RRE_RR },
- { "iske", 0x29, INSTR_RRE_RR },
- { "rrbe", 0x2a, INSTR_RRE_RR },
- { "sske", 0x2b, INSTR_RRF_M0RR },
- { "tb", 0x2c, INSTR_RRE_0R },
- { "dxr", 0x2d, INSTR_RRE_FF },
- { "pgin", 0x2e, INSTR_RRE_RR },
- { "pgout", 0x2f, INSTR_RRE_RR },
- { "csch", 0x30, INSTR_S_00 },
- { "hsch", 0x31, INSTR_S_00 },
- { "msch", 0x32, INSTR_S_RD },
- { "ssch", 0x33, INSTR_S_RD },
- { "stsch", 0x34, INSTR_S_RD },
- { "tsch", 0x35, INSTR_S_RD },
- { "tpi", 0x36, INSTR_S_RD },
- { "sal", 0x37, INSTR_S_00 },
- { "rsch", 0x38, INSTR_S_00 },
- { "stcrw", 0x39, INSTR_S_RD },
- { "stcps", 0x3a, INSTR_S_RD },
- { "rchp", 0x3b, INSTR_S_00 },
- { "schm", 0x3c, INSTR_S_00 },
- { "bakr", 0x40, INSTR_RRE_RR },
- { "cksm", 0x41, INSTR_RRE_RR },
- { "sqdr", 0x44, INSTR_RRE_FF },
- { "sqer", 0x45, INSTR_RRE_FF },
- { "stura", 0x46, INSTR_RRE_RR },
- { "msta", 0x47, INSTR_RRE_R0 },
- { "palb", 0x48, INSTR_RRE_00 },
- { "ereg", 0x49, INSTR_RRE_RR },
- { "esta", 0x4a, INSTR_RRE_RR },
- { "lura", 0x4b, INSTR_RRE_RR },
- { "tar", 0x4c, INSTR_RRE_AR },
- { "cpya", 0x4d, INSTR_RRE_AA },
- { "sar", 0x4e, INSTR_RRE_AR },
- { "ear", 0x4f, INSTR_RRE_RA },
- { "csp", 0x50, INSTR_RRE_RR },
- { "msr", 0x52, INSTR_RRE_RR },
- { "mvpg", 0x54, INSTR_RRE_RR },
- { "mvst", 0x55, INSTR_RRE_RR },
- { "cuse", 0x57, INSTR_RRE_RR },
- { "bsg", 0x58, INSTR_RRE_RR },
- { "bsa", 0x5a, INSTR_RRE_RR },
- { "clst", 0x5d, INSTR_RRE_RR },
- { "srst", 0x5e, INSTR_RRE_RR },
- { "cmpsc", 0x63, INSTR_RRE_RR },
- { "siga", 0x74, INSTR_S_RD },
- { "xsch", 0x76, INSTR_S_00 },
- { "rp", 0x77, INSTR_S_RD },
- { "stcke", 0x78, INSTR_S_RD },
- { "sacf", 0x79, INSTR_S_RD },
- { "stsi", 0x7d, INSTR_S_RD },
- { "srnm", 0x99, INSTR_S_RD },
- { "stfpc", 0x9c, INSTR_S_RD },
- { "lfpc", 0x9d, INSTR_S_RD },
- { "tre", 0xa5, INSTR_RRE_RR },
- { "cuutf", 0xa6, INSTR_RRF_M0RR },
- { "cutfu", 0xa7, INSTR_RRF_M0RR },
- { "stfl", 0xb1, INSTR_S_RD },
- { "trap4", 0xff, INSTR_S_RD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_b3[] = {
- { "maylr", 0x38, INSTR_RRF_F0FF },
- { "mylr", 0x39, INSTR_RRF_F0FF },
- { "mayr", 0x3a, INSTR_RRF_F0FF },
- { "myr", 0x3b, INSTR_RRF_F0FF },
- { "mayhr", 0x3c, INSTR_RRF_F0FF },
- { "myhr", 0x3d, INSTR_RRF_F0FF },
- { "lpdfr", 0x70, INSTR_RRE_FF },
- { "lndfr", 0x71, INSTR_RRE_FF },
- { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
- { "lcdfr", 0x73, INSTR_RRE_FF },
- { "sfasr", 0x85, INSTR_RRE_R0 },
- { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR },
- { "ldgr", 0xc1, INSTR_RRE_FR },
- { "cegr", 0xc4, INSTR_RRE_FR },
- { "cdgr", 0xc5, INSTR_RRE_FR },
- { "cxgr", 0xc6, INSTR_RRE_FR },
- { "cger", 0xc8, INSTR_RRF_U0RF },
- { "cgdr", 0xc9, INSTR_RRF_U0RF },
- { "cgxr", 0xca, INSTR_RRF_U0RF },
- { "lgdr", 0xcd, INSTR_RRE_RF },
- { "mdtra", 0xd0, INSTR_RRF_FUFF2 },
- { "ddtra", 0xd1, INSTR_RRF_FUFF2 },
- { "adtra", 0xd2, INSTR_RRF_FUFF2 },
- { "sdtra", 0xd3, INSTR_RRF_FUFF2 },
- { "ldetr", 0xd4, INSTR_RRF_0UFF },
- { "ledtr", 0xd5, INSTR_RRF_UUFF },
- { "ltdtr", 0xd6, INSTR_RRE_FF },
- { "fidtr", 0xd7, INSTR_RRF_UUFF },
- { "mxtra", 0xd8, INSTR_RRF_FUFF2 },
- { "dxtra", 0xd9, INSTR_RRF_FUFF2 },
- { "axtra", 0xda, INSTR_RRF_FUFF2 },
- { "sxtra", 0xdb, INSTR_RRF_FUFF2 },
- { "lxdtr", 0xdc, INSTR_RRF_0UFF },
- { "ldxtr", 0xdd, INSTR_RRF_UUFF },
- { "ltxtr", 0xde, INSTR_RRE_FF },
- { "fixtr", 0xdf, INSTR_RRF_UUFF },
- { "kdtr", 0xe0, INSTR_RRE_FF },
- { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF },
- { "cudtr", 0xe2, INSTR_RRE_RF },
- { "csdtr", 0xe3, INSTR_RRE_RF },
- { "cdtr", 0xe4, INSTR_RRE_FF },
- { "eedtr", 0xe5, INSTR_RRE_RF },
- { "esdtr", 0xe7, INSTR_RRE_RF },
- { "kxtr", 0xe8, INSTR_RRE_FF },
- { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR },
- { "cuxtr", 0xea, INSTR_RRE_RF },
- { "csxtr", 0xeb, INSTR_RRE_RF },
- { "cxtr", 0xec, INSTR_RRE_FF },
- { "eextr", 0xed, INSTR_RRE_RF },
- { "esxtr", 0xef, INSTR_RRE_RF },
- { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR },
- { "cdutr", 0xf2, INSTR_RRE_FR },
- { "cdstr", 0xf3, INSTR_RRE_FR },
- { "cedtr", 0xf4, INSTR_RRE_FF },
- { "qadtr", 0xf5, INSTR_RRF_FUFF },
- { "iedtr", 0xf6, INSTR_RRF_F0FR },
- { "rrdtr", 0xf7, INSTR_RRF_FFRU },
- { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF },
- { "cxutr", 0xfa, INSTR_RRE_FR },
- { "cxstr", 0xfb, INSTR_RRE_FR },
- { "cextr", 0xfc, INSTR_RRE_FF },
- { "qaxtr", 0xfd, INSTR_RRF_FUFF },
- { "iextr", 0xfe, INSTR_RRF_F0FR },
- { "rrxtr", 0xff, INSTR_RRF_FFRU },
- { "lpebr", 0x00, INSTR_RRE_FF },
- { "lnebr", 0x01, INSTR_RRE_FF },
- { "ltebr", 0x02, INSTR_RRE_FF },
- { "lcebr", 0x03, INSTR_RRE_FF },
- { "ldebr", 0x04, INSTR_RRE_FF },
- { "lxdbr", 0x05, INSTR_RRE_FF },
- { "lxebr", 0x06, INSTR_RRE_FF },
- { "mxdbr", 0x07, INSTR_RRE_FF },
- { "kebr", 0x08, INSTR_RRE_FF },
- { "cebr", 0x09, INSTR_RRE_FF },
- { "aebr", 0x0a, INSTR_RRE_FF },
- { "sebr", 0x0b, INSTR_RRE_FF },
- { "mdebr", 0x0c, INSTR_RRE_FF },
- { "debr", 0x0d, INSTR_RRE_FF },
- { "maebr", 0x0e, INSTR_RRF_F0FF },
- { "msebr", 0x0f, INSTR_RRF_F0FF },
- { "lpdbr", 0x10, INSTR_RRE_FF },
- { "lndbr", 0x11, INSTR_RRE_FF },
- { "ltdbr", 0x12, INSTR_RRE_FF },
- { "lcdbr", 0x13, INSTR_RRE_FF },
- { "sqebr", 0x14, INSTR_RRE_FF },
- { "sqdbr", 0x15, INSTR_RRE_FF },
- { "sqxbr", 0x16, INSTR_RRE_FF },
- { "meebr", 0x17, INSTR_RRE_FF },
- { "kdbr", 0x18, INSTR_RRE_FF },
- { "cdbr", 0x19, INSTR_RRE_FF },
- { "adbr", 0x1a, INSTR_RRE_FF },
- { "sdbr", 0x1b, INSTR_RRE_FF },
- { "mdbr", 0x1c, INSTR_RRE_FF },
- { "ddbr", 0x1d, INSTR_RRE_FF },
- { "madbr", 0x1e, INSTR_RRF_F0FF },
- { "msdbr", 0x1f, INSTR_RRF_F0FF },
- { "lder", 0x24, INSTR_RRE_FF },
- { "lxdr", 0x25, INSTR_RRE_FF },
- { "lxer", 0x26, INSTR_RRE_FF },
- { "maer", 0x2e, INSTR_RRF_F0FF },
- { "mser", 0x2f, INSTR_RRF_F0FF },
- { "sqxr", 0x36, INSTR_RRE_FF },
- { "meer", 0x37, INSTR_RRE_FF },
- { "madr", 0x3e, INSTR_RRF_F0FF },
- { "msdr", 0x3f, INSTR_RRF_F0FF },
- { "lpxbr", 0x40, INSTR_RRE_FF },
- { "lnxbr", 0x41, INSTR_RRE_FF },
- { "ltxbr", 0x42, INSTR_RRE_FF },
- { "lcxbr", 0x43, INSTR_RRE_FF },
- { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF },
- { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF },
- { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF },
- { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF },
- { "kxbr", 0x48, INSTR_RRE_FF },
- { "cxbr", 0x49, INSTR_RRE_FF },
- { "axbr", 0x4a, INSTR_RRE_FF },
- { "sxbr", 0x4b, INSTR_RRE_FF },
- { "mxbr", 0x4c, INSTR_RRE_FF },
- { "dxbr", 0x4d, INSTR_RRE_FF },
- { "tbedr", 0x50, INSTR_RRF_U0FF },
- { "tbdr", 0x51, INSTR_RRF_U0FF },
- { "diebr", 0x53, INSTR_RRF_FUFF },
- { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF },
- { "thder", 0x58, INSTR_RRE_FF },
- { "thdr", 0x59, INSTR_RRE_FF },
- { "didbr", 0x5b, INSTR_RRF_FUFF },
- { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF },
- { "lpxr", 0x60, INSTR_RRE_FF },
- { "lnxr", 0x61, INSTR_RRE_FF },
- { "ltxr", 0x62, INSTR_RRE_FF },
- { "lcxr", 0x63, INSTR_RRE_FF },
- { "lxr", 0x65, INSTR_RRE_FF },
- { "lexr", 0x66, INSTR_RRE_FF },
- { "fixr", 0x67, INSTR_RRE_FF },
- { "cxr", 0x69, INSTR_RRE_FF },
- { "lzer", 0x74, INSTR_RRE_F0 },
- { "lzdr", 0x75, INSTR_RRE_F0 },
- { "lzxr", 0x76, INSTR_RRE_F0 },
- { "fier", 0x77, INSTR_RRE_FF },
- { "fidr", 0x7f, INSTR_RRE_FF },
- { "sfpc", 0x84, INSTR_RRE_RR_OPT },
- { "efpc", 0x8c, INSTR_RRE_RR_OPT },
- { "cefbr", 0x94, INSTR_RRE_RF },
- { "cdfbr", 0x95, INSTR_RRE_RF },
- { "cxfbr", 0x96, INSTR_RRE_RF },
- { "cfebr", 0x98, INSTR_RRF_U0RF },
- { "cfdbr", 0x99, INSTR_RRF_U0RF },
- { "cfxbr", 0x9a, INSTR_RRF_U0RF },
- { "cefr", 0xb4, INSTR_RRE_FR },
- { "cdfr", 0xb5, INSTR_RRE_FR },
- { "cxfr", 0xb6, INSTR_RRE_FR },
- { "cfer", 0xb8, INSTR_RRF_U0RF },
- { "cfdr", 0xb9, INSTR_RRF_U0RF },
- { "cfxr", 0xba, INSTR_RRF_U0RF },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_b9[] = {
- { "lpgr", 0x00, INSTR_RRE_RR },
- { "lngr", 0x01, INSTR_RRE_RR },
- { "ltgr", 0x02, INSTR_RRE_RR },
- { "lcgr", 0x03, INSTR_RRE_RR },
- { "lgr", 0x04, INSTR_RRE_RR },
- { "lurag", 0x05, INSTR_RRE_RR },
- { "lgbr", 0x06, INSTR_RRE_RR },
- { "lghr", 0x07, INSTR_RRE_RR },
- { "agr", 0x08, INSTR_RRE_RR },
- { "sgr", 0x09, INSTR_RRE_RR },
- { "algr", 0x0a, INSTR_RRE_RR },
- { "slgr", 0x0b, INSTR_RRE_RR },
- { "msgr", 0x0c, INSTR_RRE_RR },
- { "dsgr", 0x0d, INSTR_RRE_RR },
- { "eregg", 0x0e, INSTR_RRE_RR },
- { "lrvgr", 0x0f, INSTR_RRE_RR },
- { "lpgfr", 0x10, INSTR_RRE_RR },
- { "lngfr", 0x11, INSTR_RRE_RR },
- { "ltgfr", 0x12, INSTR_RRE_RR },
- { "lcgfr", 0x13, INSTR_RRE_RR },
- { "lgfr", 0x14, INSTR_RRE_RR },
- { "llgfr", 0x16, INSTR_RRE_RR },
- { "llgtr", 0x17, INSTR_RRE_RR },
- { "agfr", 0x18, INSTR_RRE_RR },
- { "sgfr", 0x19, INSTR_RRE_RR },
- { "algfr", 0x1a, INSTR_RRE_RR },
- { "slgfr", 0x1b, INSTR_RRE_RR },
- { "msgfr", 0x1c, INSTR_RRE_RR },
- { "dsgfr", 0x1d, INSTR_RRE_RR },
- { "cgr", 0x20, INSTR_RRE_RR },
- { "clgr", 0x21, INSTR_RRE_RR },
- { "sturg", 0x25, INSTR_RRE_RR },
- { "lbr", 0x26, INSTR_RRE_RR },
- { "lhr", 0x27, INSTR_RRE_RR },
- { "cgfr", 0x30, INSTR_RRE_RR },
- { "clgfr", 0x31, INSTR_RRE_RR },
- { "cfdtr", 0x41, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF },
- { "bctgr", 0x46, INSTR_RRE_RR },
- { "cfxtr", 0x49, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR },
- { "cdftr", 0x51, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR },
- { "cxftr", 0x59, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR },
- { "cgrt", 0x60, INSTR_RRF_U0RR },
- { "clgrt", 0x61, INSTR_RRF_U0RR },
- { "crt", 0x72, INSTR_RRF_U0RR },
- { "clrt", 0x73, INSTR_RRF_U0RR },
- { "ngr", 0x80, INSTR_RRE_RR },
- { "ogr", 0x81, INSTR_RRE_RR },
- { "xgr", 0x82, INSTR_RRE_RR },
- { "flogr", 0x83, INSTR_RRE_RR },
- { "llgcr", 0x84, INSTR_RRE_RR },
- { "llghr", 0x85, INSTR_RRE_RR },
- { "mlgr", 0x86, INSTR_RRE_RR },
- { "dlgr", 0x87, INSTR_RRE_RR },
- { "alcgr", 0x88, INSTR_RRE_RR },
- { "slbgr", 0x89, INSTR_RRE_RR },
- { "cspg", 0x8a, INSTR_RRE_RR },
- { "idte", 0x8e, INSTR_RRF_R0RR },
- { "crdte", 0x8f, INSTR_RRF_RMRR },
- { "llcr", 0x94, INSTR_RRE_RR },
- { "llhr", 0x95, INSTR_RRE_RR },
- { "esea", 0x9d, INSTR_RRE_R0 },
- { "ptf", 0xa2, INSTR_RRE_R0 },
- { "lptea", 0xaa, INSTR_RRF_RURR },
- { "rrbm", 0xae, INSTR_RRE_RR },
- { "pfmf", 0xaf, INSTR_RRE_RR },
- { "cu14", 0xb0, INSTR_RRF_M0RR },
- { "cu24", 0xb1, INSTR_RRF_M0RR },
- { "cu41", 0xb2, INSTR_RRE_RR },
- { "cu42", 0xb3, INSTR_RRE_RR },
- { "trtre", 0xbd, INSTR_RRF_M0RR },
- { "srstu", 0xbe, INSTR_RRE_RR },
- { "trte", 0xbf, INSTR_RRF_M0RR },
- { "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
- { "shhhr", 0xc9, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 },
- { "chhr", 0xcd, INSTR_RRE_RR },
- { "clhhr", 0xcf, INSTR_RRE_RR },
- { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR },
- { "pcilg", 0xd2, INSTR_RRE_RR },
- { "rpcit", 0xd3, INSTR_RRE_RR },
- { "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
- { "shhlr", 0xd9, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 },
- { "chlr", 0xdd, INSTR_RRE_RR },
- { "clhlr", 0xdf, INSTR_RRE_RR },
- { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
- { "locgr", 0xe2, INSTR_RRF_M0RR },
- { "ngrk", 0xe4, INSTR_RRF_R0RR2 },
- { "ogrk", 0xe6, INSTR_RRF_R0RR2 },
- { "xgrk", 0xe7, INSTR_RRF_R0RR2 },
- { "agrk", 0xe8, INSTR_RRF_R0RR2 },
- { "sgrk", 0xe9, INSTR_RRF_R0RR2 },
- { "algrk", 0xea, INSTR_RRF_R0RR2 },
- { "slgrk", 0xeb, INSTR_RRF_R0RR2 },
- { "locr", 0xf2, INSTR_RRF_M0RR },
- { "nrk", 0xf4, INSTR_RRF_R0RR2 },
- { "ork", 0xf6, INSTR_RRF_R0RR2 },
- { "xrk", 0xf7, INSTR_RRF_R0RR2 },
- { "ark", 0xf8, INSTR_RRF_R0RR2 },
- { "srk", 0xf9, INSTR_RRF_R0RR2 },
- { "alrk", 0xfa, INSTR_RRF_R0RR2 },
- { "slrk", 0xfb, INSTR_RRF_R0RR2 },
- { "kmac", 0x1e, INSTR_RRE_RR },
- { "lrvr", 0x1f, INSTR_RRE_RR },
- { "km", 0x2e, INSTR_RRE_RR },
- { "kmc", 0x2f, INSTR_RRE_RR },
- { "kimd", 0x3e, INSTR_RRE_RR },
- { "klmd", 0x3f, INSTR_RRE_RR },
- { "epsw", 0x8d, INSTR_RRE_RR },
- { "trtt", 0x90, INSTR_RRF_M0RR },
- { "trto", 0x91, INSTR_RRF_M0RR },
- { "trot", 0x92, INSTR_RRF_M0RR },
- { "troo", 0x93, INSTR_RRF_M0RR },
- { "mlr", 0x96, INSTR_RRE_RR },
- { "dlr", 0x97, INSTR_RRE_RR },
- { "alcr", 0x98, INSTR_RRE_RR },
- { "slbr", 0x99, INSTR_RRE_RR },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c0[] = {
- { "lgfi", 0x01, INSTR_RIL_RI },
- { "xihf", 0x06, INSTR_RIL_RU },
- { "xilf", 0x07, INSTR_RIL_RU },
- { "iihf", 0x08, INSTR_RIL_RU },
- { "iilf", 0x09, INSTR_RIL_RU },
- { "nihf", 0x0a, INSTR_RIL_RU },
- { "nilf", 0x0b, INSTR_RIL_RU },
- { "oihf", 0x0c, INSTR_RIL_RU },
- { "oilf", 0x0d, INSTR_RIL_RU },
- { "llihf", 0x0e, INSTR_RIL_RU },
- { "llilf", 0x0f, INSTR_RIL_RU },
- { "larl", 0x00, INSTR_RIL_RP },
- { "brcl", 0x04, INSTR_RIL_UP },
- { "brasl", 0x05, INSTR_RIL_RP },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c2[] = {
- { "msgfi", 0x00, INSTR_RIL_RI },
- { "msfi", 0x01, INSTR_RIL_RI },
- { "slgfi", 0x04, INSTR_RIL_RU },
- { "slfi", 0x05, INSTR_RIL_RU },
- { "agfi", 0x08, INSTR_RIL_RI },
- { "afi", 0x09, INSTR_RIL_RI },
- { "algfi", 0x0a, INSTR_RIL_RU },
- { "alfi", 0x0b, INSTR_RIL_RU },
- { "cgfi", 0x0c, INSTR_RIL_RI },
- { "cfi", 0x0d, INSTR_RIL_RI },
- { "clgfi", 0x0e, INSTR_RIL_RU },
- { "clfi", 0x0f, INSTR_RIL_RU },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c4[] = {
- { "llhrl", 0x02, INSTR_RIL_RP },
- { "lghrl", 0x04, INSTR_RIL_RP },
- { "lhrl", 0x05, INSTR_RIL_RP },
- { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
- { "sthrl", 0x07, INSTR_RIL_RP },
- { "lgrl", 0x08, INSTR_RIL_RP },
- { "stgrl", 0x0b, INSTR_RIL_RP },
- { "lgfrl", 0x0c, INSTR_RIL_RP },
- { "lrl", 0x0d, INSTR_RIL_RP },
- { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
- { "strl", 0x0f, INSTR_RIL_RP },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c6[] = {
- { "exrl", 0x00, INSTR_RIL_RP },
- { "pfdrl", 0x02, INSTR_RIL_UP },
- { "cghrl", 0x04, INSTR_RIL_RP },
- { "chrl", 0x05, INSTR_RIL_RP },
- { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
- { "clhrl", 0x07, INSTR_RIL_RP },
- { "cgrl", 0x08, INSTR_RIL_RP },
- { "clgrl", 0x0a, INSTR_RIL_RP },
- { "cgfrl", 0x0c, INSTR_RIL_RP },
- { "crl", 0x0d, INSTR_RIL_RP },
- { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
- { "clrl", 0x0f, INSTR_RIL_RP },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c8[] = {
- { "mvcos", 0x00, INSTR_SSF_RRDRD },
- { "ectg", 0x01, INSTR_SSF_RRDRD },
- { "csst", 0x02, INSTR_SSF_RRDRD },
- { "lpd", 0x04, INSTR_SSF_RRDRD2 },
- { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_cc[] = {
- { "brcth", 0x06, INSTR_RIL_RP },
- { "aih", 0x08, INSTR_RIL_RI },
- { "alsih", 0x0a, INSTR_RIL_RI },
- { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
- { "cih", 0x0d, INSTR_RIL_RI },
- { "clih", 0x0f, INSTR_RIL_RI },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_e3[] = {
- { "ltg", 0x02, INSTR_RXY_RRRD },
- { "lrag", 0x03, INSTR_RXY_RRRD },
- { "lg", 0x04, INSTR_RXY_RRRD },
- { "cvby", 0x06, INSTR_RXY_RRRD },
- { "ag", 0x08, INSTR_RXY_RRRD },
- { "sg", 0x09, INSTR_RXY_RRRD },
- { "alg", 0x0a, INSTR_RXY_RRRD },
- { "slg", 0x0b, INSTR_RXY_RRRD },
- { "msg", 0x0c, INSTR_RXY_RRRD },
- { "dsg", 0x0d, INSTR_RXY_RRRD },
- { "cvbg", 0x0e, INSTR_RXY_RRRD },
- { "lrvg", 0x0f, INSTR_RXY_RRRD },
- { "lt", 0x12, INSTR_RXY_RRRD },
- { "lray", 0x13, INSTR_RXY_RRRD },
- { "lgf", 0x14, INSTR_RXY_RRRD },
- { "lgh", 0x15, INSTR_RXY_RRRD },
- { "llgf", 0x16, INSTR_RXY_RRRD },
- { "llgt", 0x17, INSTR_RXY_RRRD },
- { "agf", 0x18, INSTR_RXY_RRRD },
- { "sgf", 0x19, INSTR_RXY_RRRD },
- { "algf", 0x1a, INSTR_RXY_RRRD },
- { "slgf", 0x1b, INSTR_RXY_RRRD },
- { "msgf", 0x1c, INSTR_RXY_RRRD },
- { "dsgf", 0x1d, INSTR_RXY_RRRD },
- { "cg", 0x20, INSTR_RXY_RRRD },
- { "clg", 0x21, INSTR_RXY_RRRD },
- { "stg", 0x24, INSTR_RXY_RRRD },
- { "ntstg", 0x25, INSTR_RXY_RRRD },
- { "cvdy", 0x26, INSTR_RXY_RRRD },
- { "cvdg", 0x2e, INSTR_RXY_RRRD },
- { "strvg", 0x2f, INSTR_RXY_RRRD },
- { "cgf", 0x30, INSTR_RXY_RRRD },
- { "clgf", 0x31, INSTR_RXY_RRRD },
- { "ltgf", 0x32, INSTR_RXY_RRRD },
- { "cgh", 0x34, INSTR_RXY_RRRD },
- { "pfd", 0x36, INSTR_RXY_URRD },
- { "strvh", 0x3f, INSTR_RXY_RRRD },
- { "bctg", 0x46, INSTR_RXY_RRRD },
- { "sty", 0x50, INSTR_RXY_RRRD },
- { "msy", 0x51, INSTR_RXY_RRRD },
- { "ny", 0x54, INSTR_RXY_RRRD },
- { "cly", 0x55, INSTR_RXY_RRRD },
- { "oy", 0x56, INSTR_RXY_RRRD },
- { "xy", 0x57, INSTR_RXY_RRRD },
- { "ly", 0x58, INSTR_RXY_RRRD },
- { "cy", 0x59, INSTR_RXY_RRRD },
- { "ay", 0x5a, INSTR_RXY_RRRD },
- { "sy", 0x5b, INSTR_RXY_RRRD },
- { "mfy", 0x5c, INSTR_RXY_RRRD },
- { "aly", 0x5e, INSTR_RXY_RRRD },
- { "sly", 0x5f, INSTR_RXY_RRRD },
- { "sthy", 0x70, INSTR_RXY_RRRD },
- { "lay", 0x71, INSTR_RXY_RRRD },
- { "stcy", 0x72, INSTR_RXY_RRRD },
- { "icy", 0x73, INSTR_RXY_RRRD },
- { "laey", 0x75, INSTR_RXY_RRRD },
- { "lb", 0x76, INSTR_RXY_RRRD },
- { "lgb", 0x77, INSTR_RXY_RRRD },
- { "lhy", 0x78, INSTR_RXY_RRRD },
- { "chy", 0x79, INSTR_RXY_RRRD },
- { "ahy", 0x7a, INSTR_RXY_RRRD },
- { "shy", 0x7b, INSTR_RXY_RRRD },
- { "mhy", 0x7c, INSTR_RXY_RRRD },
- { "ng", 0x80, INSTR_RXY_RRRD },
- { "og", 0x81, INSTR_RXY_RRRD },
- { "xg", 0x82, INSTR_RXY_RRRD },
- { "lgat", 0x85, INSTR_RXY_RRRD },
- { "mlg", 0x86, INSTR_RXY_RRRD },
- { "dlg", 0x87, INSTR_RXY_RRRD },
- { "alcg", 0x88, INSTR_RXY_RRRD },
- { "slbg", 0x89, INSTR_RXY_RRRD },
- { "stpq", 0x8e, INSTR_RXY_RRRD },
- { "lpq", 0x8f, INSTR_RXY_RRRD },
- { "llgc", 0x90, INSTR_RXY_RRRD },
- { "llgh", 0x91, INSTR_RXY_RRRD },
- { "llc", 0x94, INSTR_RXY_RRRD },
- { "llh", 0x95, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD },
- { "lat", 0x9f, INSTR_RXY_RRRD },
- { "lbh", 0xc0, INSTR_RXY_RRRD },
- { "llch", 0xc2, INSTR_RXY_RRRD },
- { "stch", 0xc3, INSTR_RXY_RRRD },
- { "lhh", 0xc4, INSTR_RXY_RRRD },
- { "llhh", 0xc6, INSTR_RXY_RRRD },
- { "sthh", 0xc7, INSTR_RXY_RRRD },
- { "lfhat", 0xc8, INSTR_RXY_RRRD },
- { "lfh", 0xca, INSTR_RXY_RRRD },
- { "stfh", 0xcb, INSTR_RXY_RRRD },
- { "chf", 0xcd, INSTR_RXY_RRRD },
- { "clhf", 0xcf, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
- { "lrv", 0x1e, INSTR_RXY_RRRD },
- { "lrvh", 0x1f, INSTR_RXY_RRRD },
- { "strv", 0x3e, INSTR_RXY_RRRD },
- { "ml", 0x96, INSTR_RXY_RRRD },
- { "dl", 0x97, INSTR_RXY_RRRD },
- { "alc", 0x98, INSTR_RXY_RRRD },
- { "slb", 0x99, INSTR_RXY_RRRD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_e5[] = {
- { "strag", 0x02, INSTR_SSE_RDRD },
- { "mvhhi", 0x44, INSTR_SIL_RDI },
- { "mvghi", 0x48, INSTR_SIL_RDI },
- { "mvhi", 0x4c, INSTR_SIL_RDI },
- { "chhsi", 0x54, INSTR_SIL_RDI },
- { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
- { "cghsi", 0x58, INSTR_SIL_RDI },
- { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
- { "chsi", 0x5c, INSTR_SIL_RDI },
- { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
- { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
- { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
- { "lasp", 0x00, INSTR_SSE_RDRD },
- { "tprot", 0x01, INSTR_SSE_RDRD },
- { "mvcsk", 0x0e, INSTR_SSE_RDRD },
- { "mvcdk", 0x0f, INSTR_SSE_RDRD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_e7[] = {
- { "lcbb", 0x27, INSTR_RXE_RRRDM },
- { "vgef", 0x13, INSTR_VRV_VVRDM },
- { "vgeg", 0x12, INSTR_VRV_VVRDM },
- { "vgbm", 0x44, INSTR_VRI_V0I0 },
- { "vgm", 0x46, INSTR_VRI_V0IIM },
- { "vl", 0x06, INSTR_VRX_VRRD0 },
- { "vlr", 0x56, INSTR_VRR_VV00000 },
- { "vlrp", 0x05, INSTR_VRX_VRRDM },
- { "vleb", 0x00, INSTR_VRX_VRRDM },
- { "vleh", 0x01, INSTR_VRX_VRRDM },
- { "vlef", 0x03, INSTR_VRX_VRRDM },
- { "vleg", 0x02, INSTR_VRX_VRRDM },
- { "vleib", 0x40, INSTR_VRI_V0IM },
- { "vleih", 0x41, INSTR_VRI_V0IM },
- { "vleif", 0x43, INSTR_VRI_V0IM },
- { "vleig", 0x42, INSTR_VRI_V0IM },
- { "vlgv", 0x21, INSTR_VRS_RVRDM },
- { "vllez", 0x04, INSTR_VRX_VRRDM },
- { "vlm", 0x36, INSTR_VRS_VVRD0 },
- { "vlbb", 0x07, INSTR_VRX_VRRDM },
- { "vlvg", 0x22, INSTR_VRS_VRRDM },
- { "vlvgp", 0x62, INSTR_VRR_VRR0000 },
- { "vll", 0x37, INSTR_VRS_VRRD0 },
- { "vmrh", 0x61, INSTR_VRR_VVV000M },
- { "vmrl", 0x60, INSTR_VRR_VVV000M },
- { "vpk", 0x94, INSTR_VRR_VVV000M },
- { "vpks", 0x97, INSTR_VRR_VVV0M0M },
- { "vpkls", 0x95, INSTR_VRR_VVV0M0M },
- { "vperm", 0x8c, INSTR_VRR_VVV000V },
- { "vpdi", 0x84, INSTR_VRR_VVV000M },
- { "vrep", 0x4d, INSTR_VRI_VVIM },
- { "vrepi", 0x45, INSTR_VRI_V0IM },
- { "vscef", 0x1b, INSTR_VRV_VWRDM },
- { "vsceg", 0x1a, INSTR_VRV_VWRDM },
- { "vsel", 0x8d, INSTR_VRR_VVV000V },
- { "vseg", 0x5f, INSTR_VRR_VV0000M },
- { "vst", 0x0e, INSTR_VRX_VRRD0 },
- { "vsteb", 0x08, INSTR_VRX_VRRDM },
- { "vsteh", 0x09, INSTR_VRX_VRRDM },
- { "vstef", 0x0b, INSTR_VRX_VRRDM },
- { "vsteg", 0x0a, INSTR_VRX_VRRDM },
- { "vstm", 0x3e, INSTR_VRS_VVRD0 },
- { "vstl", 0x3f, INSTR_VRS_VRRD0 },
- { "vuph", 0xd7, INSTR_VRR_VV0000M },
- { "vuplh", 0xd5, INSTR_VRR_VV0000M },
- { "vupl", 0xd6, INSTR_VRR_VV0000M },
- { "vupll", 0xd4, INSTR_VRR_VV0000M },
- { "va", 0xf3, INSTR_VRR_VVV000M },
- { "vacc", 0xf1, INSTR_VRR_VVV000M },
- { "vac", 0xbb, INSTR_VRR_VVVM00V },
- { "vaccc", 0xb9, INSTR_VRR_VVVM00V },
- { "vn", 0x68, INSTR_VRR_VVV0000 },
- { "vnc", 0x69, INSTR_VRR_VVV0000 },
- { "vavg", 0xf2, INSTR_VRR_VVV000M },
- { "vavgl", 0xf0, INSTR_VRR_VVV000M },
- { "vcksm", 0x66, INSTR_VRR_VVV0000 },
- { "vec", 0xdb, INSTR_VRR_VV0000M },
- { "vecl", 0xd9, INSTR_VRR_VV0000M },
- { "vceq", 0xf8, INSTR_VRR_VVV0M0M },
- { "vch", 0xfb, INSTR_VRR_VVV0M0M },
- { "vchl", 0xf9, INSTR_VRR_VVV0M0M },
- { "vclz", 0x53, INSTR_VRR_VV0000M },
- { "vctz", 0x52, INSTR_VRR_VV0000M },
- { "vx", 0x6d, INSTR_VRR_VVV0000 },
- { "vgfm", 0xb4, INSTR_VRR_VVV000M },
- { "vgfma", 0xbc, INSTR_VRR_VVVM00V },
- { "vlc", 0xde, INSTR_VRR_VV0000M },
- { "vlp", 0xdf, INSTR_VRR_VV0000M },
- { "vmx", 0xff, INSTR_VRR_VVV000M },
- { "vmxl", 0xfd, INSTR_VRR_VVV000M },
- { "vmn", 0xfe, INSTR_VRR_VVV000M },
- { "vmnl", 0xfc, INSTR_VRR_VVV000M },
- { "vmal", 0xaa, INSTR_VRR_VVVM00V },
- { "vmae", 0xae, INSTR_VRR_VVVM00V },
- { "vmale", 0xac, INSTR_VRR_VVVM00V },
- { "vmah", 0xab, INSTR_VRR_VVVM00V },
- { "vmalh", 0xa9, INSTR_VRR_VVVM00V },
- { "vmao", 0xaf, INSTR_VRR_VVVM00V },
- { "vmalo", 0xad, INSTR_VRR_VVVM00V },
- { "vmh", 0xa3, INSTR_VRR_VVV000M },
- { "vmlh", 0xa1, INSTR_VRR_VVV000M },
- { "vml", 0xa2, INSTR_VRR_VVV000M },
- { "vme", 0xa6, INSTR_VRR_VVV000M },
- { "vmle", 0xa4, INSTR_VRR_VVV000M },
- { "vmo", 0xa7, INSTR_VRR_VVV000M },
- { "vmlo", 0xa5, INSTR_VRR_VVV000M },
- { "vno", 0x6b, INSTR_VRR_VVV0000 },
- { "vo", 0x6a, INSTR_VRR_VVV0000 },
- { { 0, LONG_INSN_VPOPCT }, 0x50, INSTR_VRR_VV0000M },
- { { 0, LONG_INSN_VERLLV }, 0x73, INSTR_VRR_VVV000M },
- { "verll", 0x33, INSTR_VRS_VVRDM },
- { "verim", 0x72, INSTR_VRI_VVV0IM },
- { "veslv", 0x70, INSTR_VRR_VVV000M },
- { "vesl", 0x30, INSTR_VRS_VVRDM },
- { { 0, LONG_INSN_VESRAV }, 0x7a, INSTR_VRR_VVV000M },
- { "vesra", 0x3a, INSTR_VRS_VVRDM },
- { { 0, LONG_INSN_VESRLV }, 0x78, INSTR_VRR_VVV000M },
- { "vesrl", 0x38, INSTR_VRS_VVRDM },
- { "vsl", 0x74, INSTR_VRR_VVV0000 },
- { "vslb", 0x75, INSTR_VRR_VVV0000 },
- { "vsldb", 0x77, INSTR_VRI_VVV0I0 },
- { "vsra", 0x7e, INSTR_VRR_VVV0000 },
- { "vsrab", 0x7f, INSTR_VRR_VVV0000 },
- { "vsrl", 0x7c, INSTR_VRR_VVV0000 },
- { "vsrlb", 0x7d, INSTR_VRR_VVV0000 },
- { "vs", 0xf7, INSTR_VRR_VVV000M },
- { "vscb", 0xf5, INSTR_VRR_VVV000M },
- { "vsb", 0xbf, INSTR_VRR_VVVM00V },
- { { 0, LONG_INSN_VSBCBI }, 0xbd, INSTR_VRR_VVVM00V },
- { "vsumg", 0x65, INSTR_VRR_VVV000M },
- { "vsumq", 0x67, INSTR_VRR_VVV000M },
- { "vsum", 0x64, INSTR_VRR_VVV000M },
- { "vtm", 0xd8, INSTR_VRR_VV00000 },
- { "vfae", 0x82, INSTR_VRR_VVV0M0M },
- { "vfee", 0x80, INSTR_VRR_VVV0M0M },
- { "vfene", 0x81, INSTR_VRR_VVV0M0M },
- { "vistr", 0x5c, INSTR_VRR_VV00M0M },
- { "vstrc", 0x8a, INSTR_VRR_VVVMM0V },
- { "vfa", 0xe3, INSTR_VRR_VVV00MM },
- { "wfc", 0xcb, INSTR_VRR_VV000MM },
- { "wfk", 0xca, INSTR_VRR_VV000MM },
- { "vfce", 0xe8, INSTR_VRR_VVV0MMM },
- { "vfch", 0xeb, INSTR_VRR_VVV0MMM },
- { "vfche", 0xea, INSTR_VRR_VVV0MMM },
- { "vcdg", 0xc3, INSTR_VRR_VV00MMM },
- { "vcdlg", 0xc1, INSTR_VRR_VV00MMM },
- { "vcgd", 0xc2, INSTR_VRR_VV00MMM },
- { "vclgd", 0xc0, INSTR_VRR_VV00MMM },
- { "vfd", 0xe5, INSTR_VRR_VVV00MM },
- { "vfi", 0xc7, INSTR_VRR_VV00MMM },
- { "vlde", 0xc4, INSTR_VRR_VV000MM },
- { "vled", 0xc5, INSTR_VRR_VV00MMM },
- { "vfm", 0xe7, INSTR_VRR_VVV00MM },
- { "vfma", 0x8f, INSTR_VRR_VVVM0MV },
- { "vfms", 0x8e, INSTR_VRR_VVVM0MV },
- { "vfpso", 0xcc, INSTR_VRR_VV00MMM },
- { "vfsq", 0xce, INSTR_VRR_VV000MM },
- { "vfs", 0xe2, INSTR_VRR_VVV00MM },
- { "vftci", 0x4a, INSTR_VRI_VVIMM },
-};
-
-static struct s390_insn opcode_eb[] = {
- { "lmg", 0x04, INSTR_RSY_RRRD },
- { "srag", 0x0a, INSTR_RSY_RRRD },
- { "slag", 0x0b, INSTR_RSY_RRRD },
- { "srlg", 0x0c, INSTR_RSY_RRRD },
- { "sllg", 0x0d, INSTR_RSY_RRRD },
- { "tracg", 0x0f, INSTR_RSY_RRRD },
- { "csy", 0x14, INSTR_RSY_RRRD },
- { "rllg", 0x1c, INSTR_RSY_RRRD },
- { "clmh", 0x20, INSTR_RSY_RURD },
- { "clmy", 0x21, INSTR_RSY_RURD },
- { "clt", 0x23, INSTR_RSY_RURD },
- { "stmg", 0x24, INSTR_RSY_RRRD },
- { "stctg", 0x25, INSTR_RSY_CCRD },
- { "stmh", 0x26, INSTR_RSY_RRRD },
- { "clgt", 0x2b, INSTR_RSY_RURD },
- { "stcmh", 0x2c, INSTR_RSY_RURD },
- { "stcmy", 0x2d, INSTR_RSY_RURD },
- { "lctlg", 0x2f, INSTR_RSY_CCRD },
- { "csg", 0x30, INSTR_RSY_RRRD },
- { "cdsy", 0x31, INSTR_RSY_RRRD },
- { "cdsg", 0x3e, INSTR_RSY_RRRD },
- { "bxhg", 0x44, INSTR_RSY_RRRD },
- { "bxleg", 0x45, INSTR_RSY_RRRD },
- { "ecag", 0x4c, INSTR_RSY_RRRD },
- { "tmy", 0x51, INSTR_SIY_URD },
- { "mviy", 0x52, INSTR_SIY_URD },
- { "niy", 0x54, INSTR_SIY_URD },
- { "cliy", 0x55, INSTR_SIY_URD },
- { "oiy", 0x56, INSTR_SIY_URD },
- { "xiy", 0x57, INSTR_SIY_URD },
- { "asi", 0x6a, INSTR_SIY_IRD },
- { "alsi", 0x6e, INSTR_SIY_IRD },
- { "agsi", 0x7a, INSTR_SIY_IRD },
- { "algsi", 0x7e, INSTR_SIY_IRD },
- { "icmh", 0x80, INSTR_RSY_RURD },
- { "icmy", 0x81, INSTR_RSY_RURD },
- { "clclu", 0x8f, INSTR_RSY_RRRD },
- { "stmy", 0x90, INSTR_RSY_RRRD },
- { "lmh", 0x96, INSTR_RSY_RRRD },
- { "lmy", 0x98, INSTR_RSY_RRRD },
- { "lamy", 0x9a, INSTR_RSY_AARD },
- { "stamy", 0x9b, INSTR_RSY_AARD },
- { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD },
- { "sic", 0xd1, INSTR_RSY_RRRD },
- { "srak", 0xdc, INSTR_RSY_RRRD },
- { "slak", 0xdd, INSTR_RSY_RRRD },
- { "srlk", 0xde, INSTR_RSY_RRRD },
- { "sllk", 0xdf, INSTR_RSY_RRRD },
- { "locg", 0xe2, INSTR_RSY_RDRM },
- { "stocg", 0xe3, INSTR_RSY_RDRM },
- { "lang", 0xe4, INSTR_RSY_RRRD },
- { "laog", 0xe6, INSTR_RSY_RRRD },
- { "laxg", 0xe7, INSTR_RSY_RRRD },
- { "laag", 0xe8, INSTR_RSY_RRRD },
- { "laalg", 0xea, INSTR_RSY_RRRD },
- { "loc", 0xf2, INSTR_RSY_RDRM },
- { "stoc", 0xf3, INSTR_RSY_RDRM },
- { "lan", 0xf4, INSTR_RSY_RRRD },
- { "lao", 0xf6, INSTR_RSY_RRRD },
- { "lax", 0xf7, INSTR_RSY_RRRD },
- { "laa", 0xf8, INSTR_RSY_RRRD },
- { "laal", 0xfa, INSTR_RSY_RRRD },
- { "lric", 0x60, INSTR_RSY_RDRM },
- { "stric", 0x61, INSTR_RSY_RDRM },
- { "mric", 0x62, INSTR_RSY_RDRM },
- { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
- { "rll", 0x1d, INSTR_RSY_RRRD },
- { "mvclu", 0x8e, INSTR_RSY_RRRD },
- { "tp", 0xc0, INSTR_RSL_R0RD },
- { "", 0, INSTR_INVALID }
+ [VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
+ [V_8] = { 4, 8, OPERAND_VR },
+ [V_12] = { 4, 12, OPERAND_VR },
+ [V_16] = { 4, 16, OPERAND_VR },
+ [V_32] = { 4, 32, OPERAND_VR },
+ [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
};
-static struct s390_insn opcode_ec[] = {
- { "brxhg", 0x44, INSTR_RIE_RRP },
- { "brxlg", 0x45, INSTR_RIE_RRP },
- { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
- { "rnsbg", 0x54, INSTR_RIE_RRUUU },
- { "risbg", 0x55, INSTR_RIE_RRUUU },
- { "rosbg", 0x56, INSTR_RIE_RRUUU },
- { "rxsbg", 0x57, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
- { "cgrj", 0x64, INSTR_RIE_RRPU },
- { "clgrj", 0x65, INSTR_RIE_RRPU },
- { "cgit", 0x70, INSTR_RIE_R0IU },
- { "clgit", 0x71, INSTR_RIE_R0UU },
- { "cit", 0x72, INSTR_RIE_R0IU },
- { "clfit", 0x73, INSTR_RIE_R0UU },
- { "crj", 0x76, INSTR_RIE_RRPU },
- { "clrj", 0x77, INSTR_RIE_RRPU },
- { "cgij", 0x7c, INSTR_RIE_RUPI },
- { "clgij", 0x7d, INSTR_RIE_RUPU },
- { "cij", 0x7e, INSTR_RIE_RUPI },
- { "clij", 0x7f, INSTR_RIE_RUPU },
- { "ahik", 0xd8, INSTR_RIE_RRI0 },
- { "aghik", 0xd9, INSTR_RIE_RRI0 },
- { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
- { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
- { "cgrb", 0xe4, INSTR_RRS_RRRDU },
- { "clgrb", 0xe5, INSTR_RRS_RRRDU },
- { "crb", 0xf6, INSTR_RRS_RRRDU },
- { "clrb", 0xf7, INSTR_RRS_RRRDU },
- { "cgib", 0xfc, INSTR_RIS_RURDI },
- { "clgib", 0xfd, INSTR_RIS_RURDU },
- { "cib", 0xfe, INSTR_RIS_RURDI },
- { "clib", 0xff, INSTR_RIS_RURDU },
- { "", 0, INSTR_INVALID }
+static const unsigned char formats[][6] = {
+ [INSTR_E] = { 0, 0, 0, 0, 0, 0 },
+ [INSTR_IE_UU] = { U4_24, U4_28, 0, 0, 0, 0 },
+ [INSTR_MII_UPP] = { U4_8, J12_12, J24_24 },
+ [INSTR_RIE_R0IU] = { R_8, I16_16, U4_32, 0, 0, 0 },
+ [INSTR_RIE_R0UU] = { R_8, U16_16, U4_32, 0, 0, 0 },
+ [INSTR_RIE_RRI0] = { R_8, R_12, I16_16, 0, 0, 0 },
+ [INSTR_RIE_RRP] = { R_8, R_12, J16_16, 0, 0, 0 },
+ [INSTR_RIE_RRPU] = { R_8, R_12, U4_32, J16_16, 0, 0 },
+ [INSTR_RIE_RRUUU] = { R_8, R_12, U8_16, U8_24, U8_32, 0 },
+ [INSTR_RIE_RUI0] = { R_8, I16_16, U4_12, 0, 0, 0 },
+ [INSTR_RIE_RUPI] = { R_8, I8_32, U4_12, J16_16, 0, 0 },
+ [INSTR_RIE_RUPU] = { R_8, U8_32, U4_12, J16_16, 0, 0 },
+ [INSTR_RIL_RI] = { R_8, I32_16, 0, 0, 0, 0 },
+ [INSTR_RIL_RP] = { R_8, J32_16, 0, 0, 0, 0 },
+ [INSTR_RIL_RU] = { R_8, U32_16, 0, 0, 0, 0 },
+ [INSTR_RIL_UP] = { U4_8, J32_16, 0, 0, 0, 0 },
+ [INSTR_RIS_RURDI] = { R_8, I8_32, U4_12, D_20, B_16, 0 },
+ [INSTR_RIS_RURDU] = { R_8, U8_32, U4_12, D_20, B_16, 0 },
+ [INSTR_RI_RI] = { R_8, I16_16, 0, 0, 0, 0 },
+ [INSTR_RI_RP] = { R_8, J16_16, 0, 0, 0, 0 },
+ [INSTR_RI_RU] = { R_8, U16_16, 0, 0, 0, 0 },
+ [INSTR_RI_UP] = { U4_8, J16_16, 0, 0, 0, 0 },
+ [INSTR_RRE_00] = { 0, 0, 0, 0, 0, 0 },
+ [INSTR_RRE_AA] = { A_24, A_28, 0, 0, 0, 0 },
+ [INSTR_RRE_AR] = { A_24, R_28, 0, 0, 0, 0 },
+ [INSTR_RRE_F0] = { F_24, 0, 0, 0, 0, 0 },
+ [INSTR_RRE_FF] = { F_24, F_28, 0, 0, 0, 0 },
+ [INSTR_RRE_FR] = { F_24, R_28, 0, 0, 0, 0 },
+ [INSTR_RRE_R0] = { R_24, 0, 0, 0, 0, 0 },
+ [INSTR_RRE_RA] = { R_24, A_28, 0, 0, 0, 0 },
+ [INSTR_RRE_RF] = { R_24, F_28, 0, 0, 0, 0 },
+ [INSTR_RRE_RR] = { R_24, R_28, 0, 0, 0, 0 },
+ [INSTR_RRF_0UFF] = { F_24, F_28, U4_20, 0, 0, 0 },
+ [INSTR_RRF_0URF] = { R_24, F_28, U4_20, 0, 0, 0 },
+ [INSTR_RRF_F0FF] = { F_16, F_24, F_28, 0, 0, 0 },
+ [INSTR_RRF_F0FF2] = { F_24, F_16, F_28, 0, 0, 0 },
+ [INSTR_RRF_F0FR] = { F_24, F_16, R_28, 0, 0, 0 },
+ [INSTR_RRF_FFRU] = { F_24, F_16, R_28, U4_20, 0, 0 },
+ [INSTR_RRF_FUFF] = { F_24, F_16, F_28, U4_20, 0, 0 },
+ [INSTR_RRF_FUFF2] = { F_24, F_28, F_16, U4_20, 0, 0 },
+ [INSTR_RRF_R0RR] = { R_24, R_16, R_28, 0, 0, 0 },
+ [INSTR_RRF_R0RR2] = { R_24, R_28, R_16, 0, 0, 0 },
+ [INSTR_RRF_RURR] = { R_24, R_28, R_16, U4_20, 0, 0 },
+ [INSTR_RRF_RURR2] = { R_24, R_16, R_28, U4_20, 0, 0 },
+ [INSTR_RRF_U0FF] = { F_24, U4_16, F_28, 0, 0, 0 },
+ [INSTR_RRF_U0RF] = { R_24, U4_16, F_28, 0, 0, 0 },
+ [INSTR_RRF_U0RR] = { R_24, R_28, U4_16, 0, 0, 0 },
+ [INSTR_RRF_UUFF] = { F_24, U4_16, F_28, U4_20, 0, 0 },
+ [INSTR_RRF_UUFR] = { F_24, U4_16, R_28, U4_20, 0, 0 },
+ [INSTR_RRF_UURF] = { R_24, U4_16, F_28, U4_20, 0, 0 },
+ [INSTR_RRS_RRRDU] = { R_8, R_12, U4_32, D_20, B_16 },
+ [INSTR_RR_FF] = { F_8, F_12, 0, 0, 0, 0 },
+ [INSTR_RR_R0] = { R_8, 0, 0, 0, 0, 0 },
+ [INSTR_RR_RR] = { R_8, R_12, 0, 0, 0, 0 },
+ [INSTR_RR_U0] = { U8_8, 0, 0, 0, 0, 0 },
+ [INSTR_RR_UR] = { U4_8, R_12, 0, 0, 0, 0 },
+ [INSTR_RSI_RRP] = { R_8, R_12, J16_16, 0, 0, 0 },
+ [INSTR_RSL_LRDFU] = { F_32, D_20, L8_8, B_16, U4_36, 0 },
+ [INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 },
+ [INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 },
+ [INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 },
+ [INSTR_RS_AARD] = { A_8, A_12, D_20, B_16, 0, 0 },
+ [INSTR_RS_CCRD] = { C_8, C_12, D_20, B_16, 0, 0 },
+ [INSTR_RS_R0RD] = { R_8, D_20, B_16, 0, 0, 0 },
+ [INSTR_RS_RRRD] = { R_8, R_12, D_20, B_16, 0, 0 },
+ [INSTR_RS_RURD] = { R_8, U4_12, D_20, B_16, 0, 0 },
+ [INSTR_RXE_FRRD] = { F_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_RXE_RRRDU] = { R_8, D_20, X_12, B_16, U4_32, 0 },
+ [INSTR_RXF_FRRDF] = { F_32, F_8, D_20, X_12, B_16, 0 },
+ [INSTR_RXY_FRRD] = { F_8, D20_20, X_12, B_16, 0, 0 },
+ [INSTR_RXY_RRRD] = { R_8, D20_20, X_12, B_16, 0, 0 },
+ [INSTR_RXY_URRD] = { U4_8, D20_20, X_12, B_16, 0, 0 },
+ [INSTR_RX_FRRD] = { F_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_RX_RRRD] = { R_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_RX_URRD] = { U4_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_SIL_RDI] = { D_20, B_16, I16_32, 0, 0, 0 },
+ [INSTR_SIL_RDU] = { D_20, B_16, U16_32, 0, 0, 0 },
+ [INSTR_SIY_IRD] = { D20_20, B_16, I8_8, 0, 0, 0 },
+ [INSTR_SIY_URD] = { D20_20, B_16, U8_8, 0, 0, 0 },
+ [INSTR_SI_RD] = { D_20, B_16, 0, 0, 0, 0 },
+ [INSTR_SI_URD] = { D_20, B_16, U8_8, 0, 0, 0 },
+ [INSTR_SMI_U0RDP] = { U4_8, J16_32, D_20, B_16, 0, 0 },
+ [INSTR_SSE_RDRD] = { D_20, B_16, D_36, B_32, 0, 0 },
+ [INSTR_SSF_RRDRD] = { D_20, B_16, D_36, B_32, R_8, 0 },
+ [INSTR_SSF_RRDRD2] = { R_8, D_20, B_16, D_36, B_32, 0 },
+ [INSTR_SS_L0RDRD] = { D_20, L8_8, B_16, D_36, B_32, 0 },
+ [INSTR_SS_L2RDRD] = { D_20, B_16, D_36, L8_8, B_32, 0 },
+ [INSTR_SS_LIRDRD] = { D_20, L4_8, B_16, D_36, B_32, U4_12 },
+ [INSTR_SS_LLRDRD] = { D_20, L4_8, B_16, D_36, L4_12, B_32 },
+ [INSTR_SS_RRRDRD] = { D_20, R_8, B_16, D_36, B_32, R_12 },
+ [INSTR_SS_RRRDRD2] = { R_8, D_20, B_16, R_12, D_36, B_32 },
+ [INSTR_SS_RRRDRD3] = { R_8, R_12, D_20, B_16, D_36, B_32 },
+ [INSTR_S_00] = { 0, 0, 0, 0, 0, 0 },
+ [INSTR_S_RD] = { D_20, B_16, 0, 0, 0, 0 },
+ [INSTR_VRI_V0IU] = { V_8, I16_16, U4_32, 0, 0, 0 },
+ [INSTR_VRI_V0U] = { V_8, U16_16, 0, 0, 0, 0 },
+ [INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 },
+ [INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 },
+ [INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 },
+ [INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 },
+ [INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 },
+ [INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 },
+ [INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 },
+ [INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 },
+ [INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 },
+ [INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 },
+ [INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 },
+ [INSTR_VRR_RV0U] = { R_8, V_12, U4_24, 0, 0, 0 },
+ [INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 },
+ [INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
+ [INSTR_VRR_VV0U] = { V_8, V_12, U4_32, 0, 0, 0 },
+ [INSTR_VRR_VV0U0U] = { V_8, V_12, U4_32, U4_24, 0, 0 },
+ [INSTR_VRR_VV0UU2] = { V_8, V_12, U4_32, U4_28, 0, 0 },
+ [INSTR_VRR_VV0UUU] = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
+ [INSTR_VRR_VVV] = { V_8, V_12, V_16, 0, 0, 0 },
+ [INSTR_VRR_VVV0U] = { V_8, V_12, V_16, U4_32, 0, 0 },
+ [INSTR_VRR_VVV0U0U] = { V_8, V_12, V_16, U4_32, U4_24, 0 },
+ [INSTR_VRR_VVV0UU] = { V_8, V_12, V_16, U4_32, U4_28, 0 },
+ [INSTR_VRR_VVV0UUU] = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
+ [INSTR_VRR_VVV0V] = { V_8, V_12, V_16, V_32, 0, 0 },
+ [INSTR_VRR_VVVU0UV] = { V_8, V_12, V_16, V_32, U4_28, U4_20 },
+ [INSTR_VRR_VVVU0V] = { V_8, V_12, V_16, V_32, U4_20, 0 },
+ [INSTR_VRR_VVVUU0V] = { V_8, V_12, V_16, V_32, U4_20, U4_24 },
+ [INSTR_VRS_RRDV] = { V_32, R_12, D_20, B_16, 0, 0 },
+ [INSTR_VRS_RVRDU] = { R_8, V_12, D_20, B_16, U4_32, 0 },
+ [INSTR_VRS_VRRD] = { V_8, R_12, D_20, B_16, 0, 0 },
+ [INSTR_VRS_VRRDU] = { V_8, R_12, D_20, B_16, U4_32, 0 },
+ [INSTR_VRS_VVRD] = { V_8, V_12, D_20, B_16, 0, 0 },
+ [INSTR_VRS_VVRDU] = { V_8, V_12, D_20, B_16, U4_32, 0 },
+ [INSTR_VRV_VVXRDU] = { V_8, D_20, VX_12, B_16, U4_32, 0 },
+ [INSTR_VRX_VRRD] = { V_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_VRX_VRRDU] = { V_8, D_20, X_12, B_16, U4_32, 0 },
+ [INSTR_VRX_VV] = { V_8, V_12, 0, 0, 0, 0 },
+ [INSTR_VSI_URDV] = { V_32, D_20, B_16, U8_8, 0, 0 },
};
-static struct s390_insn opcode_ed[] = {
- { "mayl", 0x38, INSTR_RXF_FRRDF },
- { "myl", 0x39, INSTR_RXF_FRRDF },
- { "may", 0x3a, INSTR_RXF_FRRDF },
- { "my", 0x3b, INSTR_RXF_FRRDF },
- { "mayh", 0x3c, INSTR_RXF_FRRDF },
- { "myh", 0x3d, INSTR_RXF_FRRDF },
- { "sldt", 0x40, INSTR_RXF_FRRDF },
- { "srdt", 0x41, INSTR_RXF_FRRDF },
- { "slxt", 0x48, INSTR_RXF_FRRDF },
- { "srxt", 0x49, INSTR_RXF_FRRDF },
- { "tdcet", 0x50, INSTR_RXE_FRRD },
- { "tdget", 0x51, INSTR_RXE_FRRD },
- { "tdcdt", 0x54, INSTR_RXE_FRRD },
- { "tdgdt", 0x55, INSTR_RXE_FRRD },
- { "tdcxt", 0x58, INSTR_RXE_FRRD },
- { "tdgxt", 0x59, INSTR_RXE_FRRD },
- { "ley", 0x64, INSTR_RXY_FRRD },
- { "ldy", 0x65, INSTR_RXY_FRRD },
- { "stey", 0x66, INSTR_RXY_FRRD },
- { "stdy", 0x67, INSTR_RXY_FRRD },
- { "czdt", 0xa8, INSTR_RSL_LRDFU },
- { "czxt", 0xa9, INSTR_RSL_LRDFU },
- { "cdzt", 0xaa, INSTR_RSL_LRDFU },
- { "cxzt", 0xab, INSTR_RSL_LRDFU },
- { "ldeb", 0x04, INSTR_RXE_FRRD },
- { "lxdb", 0x05, INSTR_RXE_FRRD },
- { "lxeb", 0x06, INSTR_RXE_FRRD },
- { "mxdb", 0x07, INSTR_RXE_FRRD },
- { "keb", 0x08, INSTR_RXE_FRRD },
- { "ceb", 0x09, INSTR_RXE_FRRD },
- { "aeb", 0x0a, INSTR_RXE_FRRD },
- { "seb", 0x0b, INSTR_RXE_FRRD },
- { "mdeb", 0x0c, INSTR_RXE_FRRD },
- { "deb", 0x0d, INSTR_RXE_FRRD },
- { "maeb", 0x0e, INSTR_RXF_FRRDF },
- { "mseb", 0x0f, INSTR_RXF_FRRDF },
- { "tceb", 0x10, INSTR_RXE_FRRD },
- { "tcdb", 0x11, INSTR_RXE_FRRD },
- { "tcxb", 0x12, INSTR_RXE_FRRD },
- { "sqeb", 0x14, INSTR_RXE_FRRD },
- { "sqdb", 0x15, INSTR_RXE_FRRD },
- { "meeb", 0x17, INSTR_RXE_FRRD },
- { "kdb", 0x18, INSTR_RXE_FRRD },
- { "cdb", 0x19, INSTR_RXE_FRRD },
- { "adb", 0x1a, INSTR_RXE_FRRD },
- { "sdb", 0x1b, INSTR_RXE_FRRD },
- { "mdb", 0x1c, INSTR_RXE_FRRD },
- { "ddb", 0x1d, INSTR_RXE_FRRD },
- { "madb", 0x1e, INSTR_RXF_FRRDF },
- { "msdb", 0x1f, INSTR_RXF_FRRDF },
- { "lde", 0x24, INSTR_RXE_FRRD },
- { "lxd", 0x25, INSTR_RXE_FRRD },
- { "lxe", 0x26, INSTR_RXE_FRRD },
- { "mae", 0x2e, INSTR_RXF_FRRDF },
- { "mse", 0x2f, INSTR_RXF_FRRDF },
- { "sqe", 0x34, INSTR_RXE_FRRD },
- { "sqd", 0x35, INSTR_RXE_FRRD },
- { "mee", 0x37, INSTR_RXE_FRRD },
- { "mad", 0x3e, INSTR_RXF_FRRDF },
- { "msd", 0x3f, INSTR_RXF_FRRDF },
- { "", 0, INSTR_INVALID }
-};
+static char long_insn_name[][7] = LONG_INSN_INITIALIZER;
+static struct s390_insn opcode[] = OPCODE_TABLE_INITIALIZER;
+static struct s390_opcode_offset opcode_offset[] = OPCODE_OFFSET_INITIALIZER;
/* Extracts an operand value from an instruction. */
static unsigned int extract_operand(unsigned char *code,
@@ -1777,114 +392,32 @@ static unsigned int extract_operand(unsigned char *code,
struct s390_insn *find_insn(unsigned char *code)
{
- unsigned char opfrag = code[1];
- unsigned char opmask;
- struct s390_insn *table;
+ struct s390_opcode_offset *entry;
+ struct s390_insn *insn;
+ unsigned char opfrag;
+ int i;
- switch (code[0]) {
- case 0x01:
- table = opcode_01;
- break;
- case 0xa5:
- table = opcode_a5;
- break;
- case 0xa7:
- table = opcode_a7;
- break;
- case 0xaa:
- table = opcode_aa;
- break;
- case 0xb2:
- table = opcode_b2;
- break;
- case 0xb3:
- table = opcode_b3;
- break;
- case 0xb9:
- table = opcode_b9;
- break;
- case 0xc0:
- table = opcode_c0;
- break;
- case 0xc2:
- table = opcode_c2;
- break;
- case 0xc4:
- table = opcode_c4;
- break;
- case 0xc6:
- table = opcode_c6;
- break;
- case 0xc8:
- table = opcode_c8;
- break;
- case 0xcc:
- table = opcode_cc;
- break;
- case 0xe3:
- table = opcode_e3;
- opfrag = code[5];
- break;
- case 0xe5:
- table = opcode_e5;
- break;
- case 0xe7:
- table = opcode_e7;
- opfrag = code[5];
- break;
- case 0xeb:
- table = opcode_eb;
- opfrag = code[5];
- break;
- case 0xec:
- table = opcode_ec;
- opfrag = code[5];
- break;
- case 0xed:
- table = opcode_ed;
- opfrag = code[5];
- break;
- default:
- table = opcode;
- opfrag = code[0];
- break;
- }
- while (table->format != INSTR_INVALID) {
- opmask = formats[table->format][0];
- if (table->opfrag == (opfrag & opmask))
- return table;
- table++;
+ /* Search the opcode offset table to find an entry which
+ * matches the beginning of the opcode. If there is no match
+ * the last entry will be used, which is the default entry for
+ * unknown instructions as well as 1-byte opcode instructions.
+ */
+ for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
+ entry = &opcode_offset[i];
+ if (entry->opcode == code[0])
+ break;
}
- return NULL;
-}
-/**
- * insn_to_mnemonic - decode an s390 instruction
- * @instruction: instruction to decode
- * @buf: buffer to fill with mnemonic
- * @len: length of buffer
- *
- * Decode the instruction at @instruction and store the corresponding
- * mnemonic into @buf of length @len.
- * @buf is left unchanged if the instruction could not be decoded.
- * Returns:
- * %0 on success, %-ENOENT if the instruction was not found.
- */
-int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
-{
- struct s390_insn *insn;
+ opfrag = *(code + entry->byte) & entry->mask;
- insn = find_insn(instruction);
- if (!insn)
- return -ENOENT;
- if (insn->name[0] == '\0')
- snprintf(buf, len, "%s",
- long_insn_name[(int) insn->name[1]]);
- else
- snprintf(buf, len, "%.5s", insn->name);
- return 0;
+ insn = &opcode[entry->offset];
+ for (i = 0; i < entry->count; i++) {
+ if (insn->opfrag == opfrag)
+ return insn;
+ insn++;
+ }
+ return NULL;
}
-EXPORT_SYMBOL_GPL(insn_to_mnemonic);
static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
{
@@ -1899,14 +432,14 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr = buffer;
insn = find_insn(code);
if (insn) {
- if (insn->name[0] == '\0')
- ptr += sprintf(ptr, "%s\t",
- long_insn_name[(int) insn->name[1]]);
+ if (insn->zero == 0)
+ ptr += sprintf(ptr, "%.7s\t",
+ long_insn_name[insn->offset]);
else
ptr += sprintf(ptr, "%.5s\t", insn->name);
/* Extract the operands. */
separator = 0;
- for (ops = formats[insn->format] + 1, i = 0;
+ for (ops = formats[insn->format], i = 0;
*ops != 0 && i < 6; ops++, i++) {
operand = operands + *ops;
value = extract_operand(code, operand);
@@ -1953,7 +486,7 @@ void show_code(struct pt_regs *regs)
{
char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64];
- char buffer[64], *ptr;
+ char buffer[128], *ptr;
mm_segment_t old_fs;
unsigned long addr;
int start, end, opsize, hops, i;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 2aa545dca4d5..5b23c4f6e50c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Stack dumping functions
*
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b945448b9eae..497a92047591 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -31,14 +31,6 @@
#include <asm/facility.h>
#include "entry.h"
-/*
- * Create a Kernel NSS if the SAVESYS= parameter is defined
- */
-#define DEFSYS_CMD_SIZE 128
-#define SAVESYS_CMD_SIZE 32
-
-char kernel_nss_name[NSS_NAME_SIZE + 1];
-
static void __init setup_boot_command_line(void);
/*
@@ -59,134 +51,6 @@ static void __init reset_tod_clock(void)
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
-#ifdef CONFIG_SHARED_KERNEL
-int __init savesys_ipl_nss(char *cmd, const int cmdlen);
-
-asm(
- " .section .init.text,\"ax\",@progbits\n"
- " .align 4\n"
- " .type savesys_ipl_nss, @function\n"
- "savesys_ipl_nss:\n"
- " stmg 6,15,48(15)\n"
- " lgr 14,3\n"
- " sam31\n"
- " diag 2,14,0x8\n"
- " sam64\n"
- " lgr 2,14\n"
- " lmg 6,15,48(15)\n"
- " br 14\n"
- " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
- " .previous\n");
-
-static __initdata char upper_command_line[COMMAND_LINE_SIZE];
-
-static noinline __init void create_kernel_nss(void)
-{
- unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
-#ifdef CONFIG_BLK_DEV_INITRD
- unsigned int sinitrd_pfn, einitrd_pfn;
-#endif
- int response;
- int hlen;
- size_t len;
- char *savesys_ptr;
- char defsys_cmd[DEFSYS_CMD_SIZE];
- char savesys_cmd[SAVESYS_CMD_SIZE];
-
- /* Do nothing if we are not running under VM */
- if (!MACHINE_IS_VM)
- return;
-
- /* Convert COMMAND_LINE to upper case */
- for (i = 0; i < strlen(boot_command_line); i++)
- upper_command_line[i] = toupper(boot_command_line[i]);
-
- savesys_ptr = strstr(upper_command_line, "SAVESYS=");
-
- if (!savesys_ptr)
- return;
-
- savesys_ptr += 8; /* Point to the beginning of the NSS name */
- for (i = 0; i < NSS_NAME_SIZE; i++) {
- if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
- break;
- kernel_nss_name[i] = savesys_ptr[i];
- }
-
- stext_pfn = PFN_DOWN(__pa(&_stext));
- eshared_pfn = PFN_DOWN(__pa(&_eshared));
- end_pfn = PFN_UP(__pa(&_end));
- min_size = end_pfn << 2;
-
- hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
- "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
- kernel_nss_name, stext_pfn - 1, stext_pfn,
- eshared_pfn - 1, eshared_pfn, end_pfn);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (INITRD_START && INITRD_SIZE) {
- sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
- einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
- min_size = einitrd_pfn << 2;
- hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
- " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
- }
-#endif
-
- snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
- " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
- defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
- snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
- kernel_nss_name, kernel_nss_name);
- savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
-
- __cpcmd(defsys_cmd, NULL, 0, &response);
-
- if (response != 0) {
- pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
- response);
- kernel_nss_name[0] = '\0';
- return;
- }
-
- len = strlen(savesys_cmd);
- ASCEBC(savesys_cmd, len);
- response = savesys_ipl_nss(savesys_cmd, len);
-
- /* On success: response is equal to the command size,
- * max SAVESYS_CMD_SIZE
- * On error: response contains the numeric portion of cp error message.
- * for SAVESYS it will be >= 263
- * for missing privilege class, it will be 1
- */
- if (response > SAVESYS_CMD_SIZE || response == 1) {
- pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
- response);
- kernel_nss_name[0] = '\0';
- return;
- }
-
- /* re-initialize cputime accounting. */
- get_tod_clock_ext(tod_clock_base);
- S390_lowcore.last_update_clock = *(__u64 *) &tod_clock_base[1];
- S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
- S390_lowcore.user_timer = 0;
- S390_lowcore.system_timer = 0;
- asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
-
- /* re-setup boot command line with new ipl vm parms */
- ipl_update_parameters();
- setup_boot_command_line();
-
- ipl_flags = IPL_NSS_VALID;
-}
-
-#else /* CONFIG_SHARED_KERNEL */
-
-static inline void create_kernel_nss(void) { }
-
-#endif /* CONFIG_SHARED_KERNEL */
-
/*
* Clear bss memory
*/
@@ -375,8 +239,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
- if (test_facility(50) && test_facility(73))
+ if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+ __ctl_set_bit(0, 55);
+ }
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
@@ -549,10 +415,6 @@ static void __init setup_boot_command_line(void)
append_to_cmdline(append_ipl_scpdata);
}
-/*
- * Save ipl parameters, clear bss memory, initialize storage keys
- * and create a kernel NSS at startup if the SAVESYS= parm is defined
- */
void __init startup_init(void)
{
reset_tod_clock();
@@ -569,7 +431,6 @@ void __init startup_init(void)
setup_arch_string();
ipl_update_parameters();
setup_boot_command_line();
- create_kernel_nss();
detect_diag9c();
detect_diag44();
detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 7c6904d616d8..9e5f6cd8e4c2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/cache.h>
+#include <asm/ctl_reg.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -179,18 +180,17 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
*/
ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- lgr %r1,%r2
- aghi %r1,__TASK_thread # thread_struct of prev task
- lg %r5,__TASK_stack(%r3) # start of kernel stack of next
- stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
- lgr %r1,%r3
- aghi %r1,__TASK_thread # thread_struct of next task
+ lghi %r4,__TASK_stack
+ lghi %r1,__TASK_thread
+ lg %r5,0(%r4,%r3) # start of kernel stack of next
+ stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
- lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
- mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+ lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
+ aghi %r3,__TASK_pid
+ mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
bzr %r14
@@ -378,13 +378,21 @@ ENTRY(system_call)
jg s390_handle_mcck # TIF bit will be cleared by handler
#
-# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
+# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
#
.Lsysc_asce:
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
+ lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
+ jz .Lsysc_return
+#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
+ tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
+ jnz .Lsysc_set_fs_fixup
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
- jz .Lsysc_return
+ j .Lsysc_return
+.Lsysc_set_fs_fixup:
+#endif
larl %r14,.Lsysc_return
jg set_fs_fixup
@@ -517,6 +525,7 @@ ENTRY(pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
+ lghi %r11,0
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
@@ -531,6 +540,7 @@ ENTRY(pgm_check_handler)
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
+ lghi %r11,_PIF_GUEST_FAULT
#endif
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 1f # -> enabled, can't be a double fault
@@ -548,13 +558,14 @@ ENTRY(pgm_check_handler)
jz 3f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: stg %r10,__THREAD_last_break(%r14)
-4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+4: lgr %r13,%r11
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ stg %r13,__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 5f
@@ -737,10 +748,18 @@ ENTRY(io_int_handler)
# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
#
.Lio_asce:
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
+ lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
+ jz .Lio_return
+#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
+ tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
+ jnz .Lio_set_fs_fixup
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
- jz .Lio_return
+ j .Lio_return
+.Lio_set_fs_fixup:
+#endif
larl %r14,.Lio_return
jg set_fs_fixup
@@ -952,15 +971,56 @@ load_fpu_regs:
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
- la %r1,4095 # revalidate r1
- spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+ la %r1,4095 # validate r1
+ spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
+ sckc __LC_CLOCK_COMPARATOR # validate comparator
+ lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
- lghi %r14,__LC_CPU_TIMER_SAVE_AREA
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
+ jno .Lmcck_panic # control registers invalid -> panic
+ la %r14,4095
+ lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
+ ptlb
+ lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
+ nill %r11,0xfc00 # MCESA_ORIGIN_MASK
+ TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
+ jno 0f
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
+ jno 0f
+ .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
+0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
+ jo 0f
+ sr %r14,%r14
+0: sfpc %r14
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+ jo 0f
+ lghi %r14,__LC_FPREGS_SAVE_AREA
+ ld %f0,0(%r14)
+ ld %f1,8(%r14)
+ ld %f2,16(%r14)
+ ld %f3,24(%r14)
+ ld %f4,32(%r14)
+ ld %f5,40(%r14)
+ ld %f6,48(%r14)
+ ld %f7,56(%r14)
+ ld %f8,64(%r14)
+ ld %f9,72(%r14)
+ ld %f10,80(%r14)
+ ld %f11,88(%r14)
+ ld %f12,96(%r14)
+ ld %f13,104(%r14)
+ ld %f14,112(%r14)
+ ld %f15,120(%r14)
+ j 1f
+0: VLM %v0,%v15,0,%r11
+ VLM %v16,%v31,256,%r11
+1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
jo 3f
@@ -976,9 +1036,13 @@ ENTRY(mcck_int_handler)
la %r14,__LC_LAST_UPDATE_TIMER
2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
- jno .Lmcck_panic # no -> skip cleanup critical
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
+3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
+ jno .Lmcck_panic
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz 4f
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
+ jno .Lmcck_panic
+4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 905bde782490..e87758f8fbdc 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -78,6 +78,7 @@ long sys_s390_runtime_instr(int command, int signum);
long sys_s390_guarded_storage(int command, struct gs_cb __user *);
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
+long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags);
DECLARE_PER_CPU(u64, mt_cycles[8]);
diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
index bff39b66c9ff..d14dd1c2e524 100644
--- a/arch/s390/kernel/guarded_storage.c
+++ b/arch/s390/kernel/guarded_storage.c
@@ -12,11 +12,10 @@
#include <asm/guarded_storage.h>
#include "entry.h"
-void exit_thread_gs(void)
+void guarded_storage_release(struct task_struct *tsk)
{
- kfree(current->thread.gs_cb);
- kfree(current->thread.gs_bc_cb);
- current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
+ kfree(tsk->thread.gs_cb);
+ kfree(tsk->thread.gs_bc_cb);
}
static int gs_enable(void)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 172002da7075..38a973ccf501 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -28,7 +28,7 @@ ENTRY(startup_continue)
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
- lghi %r0,__LC_PASTE
+ larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
# Setup stack
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 8e622bb52f7a..8ecb8726ac47 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ipl/reipl/dump support for Linux on s390.
*
@@ -279,8 +280,6 @@ static __init enum ipl_type get_ipl_type(void)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
- if (ipl_flags & IPL_NSS_VALID)
- return IPL_TYPE_NSS;
if (!(ipl_flags & IPL_DEVNO_VALID))
return IPL_TYPE_UNKNOWN;
if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -533,22 +532,6 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
.attrs = ipl_ccw_attrs_lpar
};
-/* NSS ipl device attributes */
-
-DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
-
-static struct attribute *ipl_nss_attrs[] = {
- &sys_ipl_type_attr.attr,
- &sys_ipl_nss_name_attr.attr,
- &sys_ipl_ccw_loadparm_attr.attr,
- &sys_ipl_vm_parm_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_nss_attr_group = {
- .attrs = ipl_nss_attrs,
-};
-
/* UNKNOWN ipl device attributes */
static struct attribute *ipl_unknown_attrs[] = {
@@ -598,9 +581,6 @@ static int __init ipl_init(void)
case IPL_TYPE_FCP_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
- case IPL_TYPE_NSS:
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
- break;
default:
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_unknown_attr_group);
@@ -1172,18 +1152,6 @@ static int __init reipl_nss_init(void)
return rc;
reipl_block_ccw_init(reipl_block_nss);
- if (ipl_info.type == IPL_TYPE_NSS) {
- memset(reipl_block_nss->ipl_info.ccw.nss_name,
- ' ', NSS_NAME_SIZE);
- memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
- kernel_nss_name, strlen(kernel_nss_name));
- ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
- reipl_block_nss->ipl_info.ccw.vm_flags |=
- DIAG308_VM_FLAGS_NSS_VALID;
-
- reipl_block_ccw_fill_parms(reipl_block_nss);
- }
-
reipl_capabilities |= IPL_TYPE_NSS;
return 0;
}
@@ -1971,9 +1939,6 @@ void __init setup_ipl(void)
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
break;
case IPL_TYPE_NSS:
- strncpy(ipl_info.data.nss.name, kernel_nss_name,
- sizeof(ipl_info.data.nss.name));
- break;
case IPL_TYPE_UNKNOWN:
/* We have no info to copy */
break;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 6842e4501e2e..af3722c28fd9 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel Probes (KProbes)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright IBM Corp. 2002, 2006
*
* s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
@@ -161,8 +148,6 @@ struct swap_insn_args {
static int swap_instruction(void *data)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct swap_insn_args *args = data;
struct ftrace_insn new_insn, *insn;
struct kprobe *p = args->p;
@@ -185,9 +170,7 @@ static int swap_instruction(void *data)
ftrace_generate_nop_insn(&new_insn);
}
skip_ftrace:
- kcb->kprobe_status = KPROBE_SWAP_INST;
s390_kernel_write(p->addr, &new_insn, len);
- kcb->kprobe_status = status;
return 0;
}
NOKPROBE_SYMBOL(swap_instruction);
@@ -574,9 +557,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
- case KPROBE_SWAP_INST:
- /* We are here because the instruction replacement failed */
- return 0;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index ae7dff110054..452502f9a0d9 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux Guest Relocation (LGR) detection
*
@@ -153,14 +154,13 @@ static void lgr_timer_set(void);
/*
* LGR timer callback
*/
-static void lgr_timer_fn(unsigned long ignored)
+static void lgr_timer_fn(struct timer_list *unused)
{
lgr_info_log();
lgr_timer_set();
}
-static struct timer_list lgr_timer =
- TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0);
+static struct timer_list lgr_timer;
/*
* Setup next LGR timer
@@ -181,6 +181,7 @@ static int __init lgr_init(void)
debug_register_view(lgr_dbf, &debug_hex_ascii_view);
lgr_info_get(&lgr_info_last);
debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last));
+ timer_setup(&lgr_timer, lgr_timer_fn, TIMER_DEFERRABLE);
lgr_timer_set();
return 0;
}
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b0ba2c26b45e..a80050bbe2e4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -106,7 +106,7 @@ static void __do_machine_kdump(void *image)
static noinline void __machine_kdump(void *image)
{
struct mcesa *mcesa;
- unsigned long cr2_old, cr2_new;
+ union ctlreg2 cr2_old, cr2_new;
int this_cpu, cpu;
lgr_info_log();
@@ -123,11 +123,12 @@ static noinline void __machine_kdump(void *image)
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __ctl_store(cr2_old, 2, 2);
- cr2_new = cr2_old | (1UL << 4);
- __ctl_load(cr2_new, 2, 2);
+ __ctl_store(cr2_old.val, 2, 2);
+ cr2_new = cr2_old;
+ cr2_new.gse = 1;
+ __ctl_load(cr2_new.val, 2, 2);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __ctl_load(cr2_old, 2, 2);
+ __ctl_load(cr2_old.val, 2, 2);
}
/*
* To create a good backchain for this CPU in the dump store_status
@@ -145,7 +146,7 @@ static noinline void __machine_kdump(void *image)
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
*/
-static int kdump_csum_valid(struct kimage *image)
+static bool kdump_csum_valid(struct kimage *image)
{
#ifdef CONFIG_CRASH_DUMP
int (*start_kdump)(int) = (void *)image->start;
@@ -154,9 +155,9 @@ static int kdump_csum_valid(struct kimage *image)
__arch_local_irq_stnsm(0xfb); /* disable DAT */
rc = start_kdump(0);
__arch_local_irq_stosm(0x04); /* enable DAT */
- return rc ? 0 : -EINVAL;
+ return rc == 0;
#else
- return -EINVAL;
+ return false;
#endif
}
@@ -219,10 +220,6 @@ int machine_kexec_prepare(struct kimage *image)
{
void *reboot_code_buffer;
- /* Can't replace kernel image since it is read-only. */
- if (ipl_flags & IPL_NSS_VALID)
- return -EOPNOTSUPP;
-
if (image->type == KEXEC_TYPE_CRASH)
return machine_kexec_prepare_kdump();
@@ -269,6 +266,7 @@ static void __do_machine_kexec(void *data)
s390_reset_system();
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
+ __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
/* Call the moving routine */
(*data_mover)(&image->head, image->start);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 1a27f307a920..b7abfad4fd7d 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel module help for s390.
*
@@ -8,20 +9,6 @@
*
* based on i386 version
* Copyright (C) 2001 Rusty Russell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/elf.h>
@@ -31,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <asm/alternative.h>
#if 0
#define DEBUGP printk
@@ -429,6 +417,19 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ const Elf_Shdr *s;
+ char *secstrings;
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (!strcmp(".altinstructions", secstrings + s->sh_name)) {
+ /* patch .altinstructions */
+ void *aseg = (void *)s->sh_addr;
+
+ apply_alternatives(aseg, aseg + s->sh_size);
+ }
+ }
+
jump_label_apply_nops(me);
return 0;
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 31d03a84126c..c7a627620e5e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Machine check handler
*
@@ -12,6 +13,9 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
+#include <linux/log2.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
@@ -37,13 +41,94 @@ struct mcck_struct {
};
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
+static struct kmem_cache *mcesa_cache;
+static unsigned long mcesa_origin_lc;
-static void s390_handle_damage(void)
+static inline int nmi_needs_mcesa(void)
{
- smp_send_stop();
+ return MACHINE_HAS_VX || MACHINE_HAS_GS;
+}
+
+static inline unsigned long nmi_get_mcesa_size(void)
+{
+ if (MACHINE_HAS_GS)
+ return MCESA_MAX_SIZE;
+ return MCESA_MIN_SIZE;
+}
+
+/*
+ * The initial machine check extended save area for the boot CPU.
+ * It will be replaced by nmi_init() with an allocated structure.
+ * The structure is required for machine check happening early in
+ * the boot process.
+ */
+static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
+
+void __init nmi_alloc_boot_cpu(struct lowcore *lc)
+{
+ if (!nmi_needs_mcesa())
+ return;
+ lc->mcesad = (unsigned long) &boot_mcesa;
+ if (MACHINE_HAS_GS)
+ lc->mcesad |= ilog2(MCESA_MAX_SIZE);
+}
+
+static int __init nmi_init(void)
+{
+ unsigned long origin, cr0, size;
+
+ if (!nmi_needs_mcesa())
+ return 0;
+ size = nmi_get_mcesa_size();
+ if (size > MCESA_MIN_SIZE)
+ mcesa_origin_lc = ilog2(size);
+ /* create slab cache for the machine-check-extended-save-areas */
+ mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
+ if (!mcesa_cache)
+ panic("Couldn't create nmi save area cache");
+ origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
+ if (!origin)
+ panic("Couldn't allocate nmi save area");
+ /* The pointer is stored with mcesa_bits ORed in */
+ kmemleak_not_leak((void *) origin);
+ __ctl_store(cr0, 0, 0);
+ __ctl_clear_bit(0, 28); /* disable lowcore protection */
+ /* Replace boot_mcesa on the boot CPU */
+ S390_lowcore.mcesad = origin | mcesa_origin_lc;
+ __ctl_load(cr0, 0, 0);
+ return 0;
+}
+early_initcall(nmi_init);
+
+int nmi_alloc_per_cpu(struct lowcore *lc)
+{
+ unsigned long origin;
+
+ if (!nmi_needs_mcesa())
+ return 0;
+ origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
+ if (!origin)
+ return -ENOMEM;
+ /* The pointer is stored with mcesa_bits ORed in */
+ kmemleak_not_leak((void *) origin);
+ lc->mcesad = origin | mcesa_origin_lc;
+ return 0;
+}
+
+void nmi_free_per_cpu(struct lowcore *lc)
+{
+ if (!nmi_needs_mcesa())
+ return;
+ kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
+}
+
+static notrace void s390_handle_damage(void)
+{
+ smp_emergency_stop();
disabled_wait((unsigned long) __builtin_return_address(0));
while (1);
}
+NOKPROBE_SYMBOL(s390_handle_damage);
/*
* Main machine check handler function. Will be called with interrupts enabled
@@ -100,18 +185,15 @@ void s390_handle_mcck(void)
EXPORT_SYMBOL_GPL(s390_handle_mcck);
/*
- * returns 0 if all registers could be validated
+ * returns 0 if all required registers are available
* returns 1 otherwise
*/
-static int notrace s390_validate_registers(union mci mci, int umode)
+static int notrace s390_check_registers(union mci mci, int umode)
{
+ union ctlreg2 cr2;
int kill_task;
- u64 zero;
- void *fpt_save_area;
- struct mcesa *mcesa;
kill_task = 0;
- zero = 0;
if (!mci.gr) {
/*
@@ -122,18 +204,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
s390_handle_damage();
kill_task = 1;
}
- /* Validate control registers */
+ /* Check control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
- } else {
- asm volatile(
- " lctlg 0,15,0(%0)\n"
- " ptlb\n"
- : : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
if (!mci.fp) {
/*
@@ -141,86 +218,41 @@ static int notrace s390_validate_registers(union mci mci, int umode)
* kernel currently uses floating point registers the
* system is stopped. If the process has its floating
* pointer registers loaded it is terminated.
- * Otherwise just revalidate the registers.
*/
if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
s390_handle_damage();
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
}
- fpt_save_area = &S390_lowcore.floating_pt_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
* If the kernel currently uses the floating pointer
* registers and needs the FPC register the system is
* stopped. If the process has its floating pointer
- * registers loaded it is terminated. Otherwiese the
- * FPC is just revalidated.
+ * registers loaded it is terminated.
*/
if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage();
- asm volatile("lfpc %0" : : "Q" (zero));
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
- } else {
- asm volatile("lfpc %0"
- : : "Q" (S390_lowcore.fpt_creg_save_area));
}
- mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
- if (!MACHINE_HAS_VX) {
- /* Validate floating point registers */
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area) : "memory");
- } else {
- /* Validate vector registers */
- union ctlreg0 cr0;
-
+ if (MACHINE_HAS_VX) {
if (!mci.vr) {
/*
* Vector registers can't be restored. If the kernel
* currently uses vector registers the system is
* stopped. If the process has its vector registers
- * loaded it is terminated. Otherwise just revalidate
- * the registers.
+ * loaded it is terminated.
*/
if (S390_lowcore.fpu_flags & KERNEL_VXR)
s390_handle_damage();
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
}
- cr0.val = S390_lowcore.cregs_save_area[0];
- cr0.afp = cr0.vx = 1;
- __ctl_load(cr0.val, 0, 0);
- asm volatile(
- " la 1,%0\n"
- " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
- " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
- : : "Q" (*(struct vx_array *) mcesa->vector_save_area)
- : "1");
- __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
- /* Validate access registers */
- asm volatile(
- " lam 0,15,0(%0)"
- : : "a" (&S390_lowcore.access_regs_save_area));
+ /* Check if access registers are valid */
if (!mci.ar) {
/*
* Access registers have unknown contents.
@@ -228,53 +260,41 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
kill_task = 1;
}
- /* Validate guarded storage registers */
- if (MACHINE_HAS_GS && (S390_lowcore.cregs_save_area[2] & (1UL << 4))) {
- if (!mci.gs)
+ /* Check guarded storage registers */
+ cr2.val = S390_lowcore.cregs_save_area[2];
+ if (cr2.gse) {
+ if (!mci.gs) {
/*
* Guarded storage register can't be restored and
* the current processes uses guarded storage.
* It has to be terminated.
*/
kill_task = 1;
- else
- load_gs_cb((struct gs_cb *)
- mcesa->guarded_storage_save_area);
+ }
}
- /*
- * We don't even try to validate the TOD register, since we simply
- * can't write something sensible into that register.
- */
- /*
- * See if we can validate the TOD programmable register with its
- * old contents (should be zero) otherwise set it to zero.
- */
- if (!mci.pr)
- asm volatile(
- " sr 0,0\n"
- " sckpf"
- : : : "0", "cc");
- else
- asm volatile(
- " l 0,%0\n"
- " sckpf"
- : : "Q" (S390_lowcore.tod_progreg_save_area)
- : "0", "cc");
- /* Validate clock comparator register */
- set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
- if (!mci.wp)
+ if (!mci.wp) {
/*
* Can't tell if we come from user or kernel mode
* -> stopping machine.
*/
s390_handle_damage();
+ }
+ /* Check for invalid kernel instruction address */
+ if (!mci.ia && !umode) {
+ /*
+ * The instruction address got lost while running
+ * in the kernel -> stopping machine.
+ */
+ s390_handle_damage();
+ }
if (!mci.ms || !mci.pm || !mci.ia)
kill_task = 1;
return kill_task;
}
+NOKPROBE_SYMBOL(s390_check_registers);
/*
* Backup the guest's machine check info to its description block
@@ -300,6 +320,7 @@ static void notrace s390_backup_mcck_info(struct pt_regs *regs)
mcck_backup->failing_storage_address
= S390_lowcore.failing_storage_address;
}
+NOKPROBE_SYMBOL(s390_backup_mcck_info);
#define MAX_IPD_COUNT 29
#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
@@ -372,7 +393,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
s390_handle_damage();
}
}
- if (s390_validate_registers(mci, user_mode(regs))) {
+ if (s390_check_registers(mci, user_mode(regs))) {
/*
* Couldn't restore all register contents for the
* user space process -> mark task for termination.
@@ -443,6 +464,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
clear_cpu_flag(CIF_MCCK_GUEST);
nmi_exit();
}
+NOKPROBE_SYMBOL(s390_do_machine_check);
static int __init machine_check_init(void)
{
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 746d03423333..cc085e2d2ce9 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
* Copyright IBM Corp. 2012, 2017
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 08bfa17ba0a0..94f90cefbffc 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -10,34 +10,42 @@
/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
-CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000);
-CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001);
-CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002);
-CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
-CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004);
-CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005);
-CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040);
-CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041);
-CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042);
-CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043);
-CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044);
-CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045);
-CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046);
-CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047);
-CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048);
-CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049);
-CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a);
-CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b);
-CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c);
-CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d);
-CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e);
-CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_fvn1, CPU_CYCLES, 0x0000);
+CPUMF_EVENT_ATTR(cf_fvn1, INSTRUCTIONS, 0x0001);
+CPUMF_EVENT_ATTR(cf_fvn1, L1I_DIR_WRITES, 0x0002);
+CPUMF_EVENT_ATTR(cf_fvn1, L1I_PENALTY_CYCLES, 0x0003);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES, 0x0020);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
+CPUMF_EVENT_ATTR(cf_fvn1, L1D_DIR_WRITES, 0x0004);
+CPUMF_EVENT_ATTR(cf_fvn1, L1D_PENALTY_CYCLES, 0x0005);
+CPUMF_EVENT_ATTR(cf_fvn3, CPU_CYCLES, 0x0000);
+CPUMF_EVENT_ATTR(cf_fvn3, INSTRUCTIONS, 0x0001);
+CPUMF_EVENT_ATTR(cf_fvn3, L1I_DIR_WRITES, 0x0002);
+CPUMF_EVENT_ATTR(cf_fvn3, L1I_PENALTY_CYCLES, 0x0003);
+CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
+CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
+CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
+CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
@@ -171,36 +179,105 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z14, L1D_L2D_SOURCED_WRITES, 0x0085);
+CPUMF_EVENT_ATTR(cf_z14, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z14, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z14, L1I_L2I_SOURCED_WRITES, 0x0088);
+CPUMF_EVENT_ATTR(cf_z14, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z14, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z14, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z14, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z14, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z14, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
+CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
+CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e9);
+CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3);
+CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
-static struct attribute *cpumcf_pmu_event_attr[] __initdata = {
- CPUMF_EVENT_PTR(cf, CPU_CYCLES),
- CPUMF_EVENT_PTR(cf, INSTRUCTIONS),
- CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, PRNG_CYCLES),
- CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, SHA_CYCLES),
- CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, DEA_CYCLES),
- CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf, AES_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, AES_CYCLES),
- CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES),
+static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn1, L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, L1D_PENALTY_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_fvn3, CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn3, INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn3, L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn3, L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn3, L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn3, L1D_PENALTY_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
NULL,
};
@@ -353,6 +430,63 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_L2D_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_L2I_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z14, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z14, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z14, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, BCD_DFP_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z14, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z14, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z14, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z14, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -379,7 +513,8 @@ static const struct attribute_group *cpumcf_pmu_attr_groups[] = {
static __init struct attribute **merge_attr(struct attribute **a,
- struct attribute **b)
+ struct attribute **b,
+ struct attribute **c)
{
struct attribute **new;
int j, i;
@@ -388,6 +523,8 @@ static __init struct attribute **merge_attr(struct attribute **a,
;
for (i = 0; b[i]; i++)
j++;
+ for (i = 0; c[i]; i++)
+ j++;
j++;
new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
@@ -398,6 +535,8 @@ static __init struct attribute **merge_attr(struct attribute **a,
new[j++] = a[i];
for (i = 0; b[i]; i++)
new[j++] = b[i];
+ for (i = 0; c[i]; i++)
+ new[j++] = c[i];
new[j] = NULL;
return new;
@@ -405,10 +544,26 @@ static __init struct attribute **merge_attr(struct attribute **a,
__init const struct attribute_group **cpumf_cf_event_group(void)
{
- struct attribute **combined, **model;
+ struct attribute **combined, **model, **cfvn, **csvn;
struct attribute *none[] = { NULL };
+ struct cpumf_ctr_info ci;
struct cpuid cpu_id;
+ /* Determine generic counters set(s) */
+ qctri(&ci);
+ switch (ci.cfvn) {
+ case 1:
+ cfvn = cpumcf_fvn1_pmu_event_attr;
+ break;
+ case 3:
+ cfvn = cpumcf_fvn3_pmu_event_attr;
+ break;
+ default:
+ cfvn = none;
+ }
+ csvn = cpumcf_svn_generic_pmu_event_attr;
+
+ /* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x2097:
@@ -427,12 +582,15 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 0x2965:
model = cpumcf_z13_pmu_event_attr;
break;
+ case 0x3906:
+ model = cpumcf_z14_pmu_event_attr;
+ break;
default:
model = none;
break;
}
- combined = merge_attr(cpumcf_pmu_event_attr, model);
+ combined = merge_attr(cfvn, csvn, model);
if (combined)
cpumcf_pmu_events_group.attrs = combined;
return cpumcf_pmu_attr_groups;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 7e1e40323b78..1c9ddd7aa5ec 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for the System z CPU-measurement Sampling Facility
*
* Copyright IBM Corp. 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "cpum_sf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
@@ -15,6 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
+#include <linux/pid.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -77,6 +75,15 @@ struct sf_buffer {
unsigned long *tail; /* last sample-data-block-table */
};
+struct aux_buffer {
+ struct sf_buffer sfb;
+ unsigned long head; /* index of SDB of buffer head */
+ unsigned long alert_mark; /* index of SDB of alert request position */
+ unsigned long empty_mark; /* mark of SDB not marked full */
+ unsigned long *sdb_index; /* SDB address for fast lookup */
+ unsigned long *sdbt_index; /* SDBT address for fast lookup */
+};
+
struct cpu_hw_sf {
/* CPU-measurement sampling information block */
struct hws_qsi_info_block qsi;
@@ -85,6 +92,7 @@ struct cpu_hw_sf {
struct sf_buffer sfb; /* Sampling buffer */
unsigned int flags; /* Status flags */
struct perf_event *event; /* Scheduled perf event */
+ struct perf_output_handle handle; /* AUX buffer output handle */
};
static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
@@ -341,22 +349,6 @@ static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
sfb_account_allocs(num, hwc);
}
-static size_t event_sample_size(struct hw_perf_event *hwc)
-{
- struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
- size_t sample_size;
-
- /* The sample size depends on the sampling function: The basic-sampling
- * function must be always enabled, diagnostic-sampling function is
- * optional.
- */
- sample_size = sfr->bsdes;
- if (SAMPL_DIAG_MODE(hwc))
- sample_size += sfr->dsdes;
-
- return sample_size;
-}
-
static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
{
if (cpuhw->sfb.sdbt)
@@ -366,35 +358,7 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
unsigned long n_sdb, freq, factor;
- size_t sfr_size, sample_size;
- struct sf_raw_sample *sfr;
-
- /* Allocate raw sample buffer
- *
- * The raw sample buffer is used to temporarily store sampling data
- * entries for perf raw sample processing. The buffer size mainly
- * depends on the size of diagnostic-sampling data entries which is
- * machine-specific. The exact size calculation includes:
- * 1. The first 4 bytes of diagnostic-sampling data entries are
- * already reflected in the sf_raw_sample structure. Subtract
- * these bytes.
- * 2. The perf raw sample data must be 8-byte aligned (u64) and
- * perf's internal data size must be considered too. So add
- * an additional u32 for correct alignment and subtract before
- * allocating the buffer.
- * 3. Store the raw sample buffer pointer in the perf event
- * hardware structure.
- */
- sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) +
- sizeof(u32), sizeof(u64));
- sfr_size -= sizeof(u32);
- sfr = kzalloc(sfr_size, GFP_KERNEL);
- if (!sfr)
- return -ENOMEM;
- sfr->size = sfr_size;
- sfr->bsdes = cpuhw->qsi.bsdes;
- sfr->dsdes = cpuhw->qsi.dsdes;
- RAWSAMPLE_REG(hwc) = (unsigned long) sfr;
+ size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
@@ -420,7 +384,7 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
* to 511 SDBs).
*/
- sample_size = event_sample_size(hwc);
+ sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
factor = 1;
n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
@@ -619,10 +583,6 @@ static int reserve_pmc_hardware(void)
static void hw_perf_event_destroy(struct perf_event *event)
{
- /* Free raw sample buffer */
- if (RAWSAMPLE_REG(&event->hw))
- kfree((void *) RAWSAMPLE_REG(&event->hw));
-
/* Release PMC if this is the last perf event */
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
@@ -642,15 +602,8 @@ static void hw_init_period(struct hw_perf_event *hwc, u64 period)
static void hw_reset_registers(struct hw_perf_event *hwc,
unsigned long *sdbt_origin)
{
- struct sf_raw_sample *sfr;
-
/* (Re)set to first sample-data-block-table */
TEAR_REG(hwc) = (unsigned long) sdbt_origin;
-
- /* (Re)set raw sampling buffer register */
- sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
- memset(&sfr->basic, 0, sizeof(sfr->basic));
- memset(&sfr->diag, 0, sfr->dsdes);
}
static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
@@ -660,6 +613,67 @@ static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
si->min_sampl_rate, si->max_sampl_rate);
}
+static u32 cpumsf_pid_type(struct perf_event *event,
+ u32 pid, enum pid_type type)
+{
+ struct task_struct *tsk;
+
+ /* Idle process */
+ if (!pid)
+ goto out;
+
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ pid = -1;
+ if (tsk) {
+ /*
+ * Only top level events contain the pid namespace in which
+ * they are created.
+ */
+ if (event->parent)
+ event = event->parent;
+ pid = __task_pid_nr_ns(tsk, type, event->ns);
+ /*
+ * See also 1d953111b648
+ * "perf/core: Don't report zero PIDs for exiting tasks".
+ */
+ if (!pid && !pid_alive(tsk))
+ pid = -1;
+ }
+out:
+ return pid;
+}
+
+static void cpumsf_output_event_pid(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ u32 pid;
+ struct perf_event_header header;
+ struct perf_output_handle handle;
+
+ /*
+ * Obtain the PID from the basic-sampling data entry and
+ * correct the data->tid_entry.pid value.
+ */
+ pid = data->tid_entry.pid;
+
+ /* Protect callchain buffers, tasks */
+ rcu_read_lock();
+
+ perf_prepare_sample(&header, data, event, regs);
+ if (perf_output_begin(&handle, event, header.size))
+ goto out;
+
+ /* Update the process ID (see also kernel/events/core.c) */
+ data->tid_entry.pid = cpumsf_pid_type(event, pid, __PIDTYPE_TGID);
+ data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
+
+ perf_output_sample(&handle, &header, data, event);
+ perf_output_end(&handle);
+out:
+ rcu_read_unlock();
+}
+
static int __hw_perf_event_init(struct perf_event *event)
{
struct cpu_hw_sf *cpuhw;
@@ -770,6 +784,10 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->extra_reg.reg = REG_OVERFLOW;
OVERFLOW_REG(hwc) = 0;
+ /* Use AUX buffer. No need to allocate it by ourself */
+ if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
+ return 0;
+
/* Allocate the per-CPU sampling buffer using the CPU information
* from the event. If the event is not pinned to a particular
* CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
@@ -789,6 +807,14 @@ static int __hw_perf_event_init(struct perf_event *event)
break;
}
}
+
+ /* If PID/TID sampling is active, replace the default overflow
+ * handler to extract and resolve the PIDs from the basic-sampling
+ * data entries.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_TID)
+ if (is_default_overflow_handler(event))
+ event->overflow_handler = cpumsf_output_event_pid;
out:
return err;
}
@@ -823,12 +849,8 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
}
/* Check online status of the CPU to which the event is pinned */
- if (event->cpu >= 0) {
- if ((unsigned int)event->cpu >= nr_cpumask_bits)
- return -ENODEV;
- if (!cpu_online(event->cpu))
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
- }
/* Force reset of idle/hv excludes regardless of what the
* user requested.
@@ -870,10 +892,15 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
*/
if (cpuhw->event) {
hwc = &cpuhw->event->hw;
- /* Account number of overflow-designated buffer extents */
- sfb_account_overflows(cpuhw, hwc);
- if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
- extend_sampling_buffer(&cpuhw->sfb, hwc);
+ if (!(SAMPL_DIAG_MODE(hwc))) {
+ /*
+ * Account number of overflow-designated
+ * buffer extents
+ */
+ sfb_account_overflows(cpuhw, hwc);
+ if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
+ extend_sampling_buffer(&cpuhw->sfb, hwc);
+ }
}
/* (Re)enable the PMU and sampling facility */
@@ -888,6 +915,9 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
return;
}
+ /* Load current program parameter */
+ lpp(&S390_lowcore.lpp);
+
debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
"tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs,
cpuhw->lsctl.ed, cpuhw->lsctl.cd,
@@ -971,22 +1001,16 @@ static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs,
*
* Return non-zero if an event overflow occurred.
*/
-static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
+static int perf_push_sample(struct perf_event *event,
+ struct hws_basic_entry *basic)
{
int overflow;
struct pt_regs regs;
struct perf_sf_sde_regs *sde_regs;
struct perf_sample_data data;
- struct perf_raw_record raw = {
- .frag = {
- .size = sfr->size,
- .data = sfr,
- },
- };
/* Setup perf sample */
perf_sample_data_init(&data, 0, event->hw.last_period);
- data.raw = &raw;
/* Setup pt_regs to look like an CPU-measurement external interrupt
* using the Program Request Alert code. The regs.int_parm_long
@@ -998,11 +1022,11 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
- psw_bits(regs.psw).ia = sfr->basic.ia;
- psw_bits(regs.psw).dat = sfr->basic.T;
- psw_bits(regs.psw).wait = sfr->basic.W;
- psw_bits(regs.psw).pstate = sfr->basic.P;
- psw_bits(regs.psw).as = sfr->basic.AS;
+ psw_bits(regs.psw).ia = basic->ia;
+ psw_bits(regs.psw).dat = basic->T;
+ psw_bits(regs.psw).wait = basic->W;
+ psw_bits(regs.psw).pstate = basic->P;
+ psw_bits(regs.psw).as = basic->AS;
/*
* Use the hardware provided configuration level to decide if the
@@ -1015,7 +1039,7 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
* If the value differs from 0xffff (the host value), we assume to
* be a KVM guest.
*/
- switch (sfr->basic.CL) {
+ switch (basic->CL) {
case 1: /* logical partition */
sde_regs->in_guest = 0;
break;
@@ -1023,11 +1047,17 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
sde_regs->in_guest = 1;
break;
default: /* old machine, use heuristics */
- if (sfr->basic.gpp || sfr->basic.prim_asn != 0xffff)
+ if (basic->gpp || basic->prim_asn != 0xffff)
sde_regs->in_guest = 1;
break;
}
+ /*
+ * Store the PID value from the sample-data-entry to be
+ * processed and resolved by cpumsf_output_event_pid().
+ */
+ data.tid_entry.pid = basic->hpp & LPP_PID_MASK;
+
overflow = 0;
if (perf_exclude_event(event, &regs, sde_regs))
goto out;
@@ -1045,75 +1075,12 @@ static void perf_event_count_update(struct perf_event *event, u64 count)
local64_add(count, &event->count);
}
-static int sample_format_is_valid(struct hws_combined_entry *sample,
- unsigned int flags)
-{
- if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
- /* Only basic-sampling data entries with data-entry-format
- * version of 0x0001 can be processed.
- */
- if (sample->basic.def != 0x0001)
- return 0;
- if (flags & PERF_CPUM_SF_DIAG_MODE)
- /* The data-entry-format number of diagnostic-sampling data
- * entries can vary. Because diagnostic data is just passed
- * through, do only a sanity check on the DEF.
- */
- if (sample->diag.def < 0x8001)
- return 0;
- return 1;
-}
-
-static int sample_is_consistent(struct hws_combined_entry *sample,
- unsigned long flags)
-{
- /* This check applies only to basic-sampling data entries of potentially
- * combined-sampling data entries. Invalid entries cannot be processed
- * by the PMU and, thus, do not deliver an associated
- * diagnostic-sampling data entry.
- */
- if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE)))
- return 0;
- /*
- * Samples are skipped, if they are invalid or for which the
- * instruction address is not predictable, i.e., the wait-state bit is
- * set.
- */
- if (sample->basic.I || sample->basic.W)
- return 0;
- return 1;
-}
-
-static void reset_sample_slot(struct hws_combined_entry *sample,
- unsigned long flags)
-{
- if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
- sample->basic.def = 0;
- if (flags & PERF_CPUM_SF_DIAG_MODE)
- sample->diag.def = 0;
-}
-
-static void sfr_store_sample(struct sf_raw_sample *sfr,
- struct hws_combined_entry *sample)
-{
- if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE))
- sfr->basic = sample->basic;
- if (sfr->format & PERF_CPUM_SF_DIAG_MODE)
- memcpy(&sfr->diag, &sample->diag, sfr->dsdes);
-}
-
-static void debug_sample_entry(struct hws_combined_entry *sample,
- struct hws_trailer_entry *te,
- unsigned long flags)
+static void debug_sample_entry(struct hws_basic_entry *sample,
+ struct hws_trailer_entry *te)
{
debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
- "sampling data entry: te->f=%i basic.def=%04x (%p)"
- " diag.def=%04x (%p)\n", te->f,
- sample->basic.def, &sample->basic,
- (flags & PERF_CPUM_SF_DIAG_MODE)
- ? sample->diag.def : 0xFFFF,
- (flags & PERF_CPUM_SF_DIAG_MODE)
- ? &sample->diag : NULL);
+ "sampling data entry: te->f=%i basic.def=%04x (%p)\n",
+ te->f, sample->def, sample);
}
/* hw_collect_samples() - Walk through a sample-data-block and collect samples
@@ -1139,44 +1106,37 @@ static void debug_sample_entry(struct hws_combined_entry *sample,
static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
unsigned long long *overflow)
{
- unsigned long flags = SAMPL_FLAGS(&event->hw);
- struct hws_combined_entry *sample;
struct hws_trailer_entry *te;
- struct sf_raw_sample *sfr;
- size_t sample_size;
-
- /* Prepare and initialize raw sample data */
- sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw);
- sfr->format = flags & PERF_CPUM_SF_MODE_MASK;
+ struct hws_basic_entry *sample;
- sample_size = event_sample_size(&event->hw);
te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
- sample = (struct hws_combined_entry *) *sdbt;
+ sample = (struct hws_basic_entry *) *sdbt;
while ((unsigned long *) sample < (unsigned long *) te) {
/* Check for an empty sample */
- if (!sample->basic.def)
+ if (!sample->def)
break;
/* Update perf event period */
perf_event_count_update(event, SAMPL_RATE(&event->hw));
- /* Check sampling data entry */
- if (sample_format_is_valid(sample, flags)) {
+ /* Check whether sample is valid */
+ if (sample->def == 0x0001) {
/* If an event overflow occurred, the PMU is stopped to
* throttle event delivery. Remaining sample data is
* discarded.
*/
if (!*overflow) {
- if (sample_is_consistent(sample, flags)) {
+ /* Check whether sample is consistent */
+ if (sample->I == 0 && sample->W == 0) {
/* Deliver sample data to perf */
- sfr_store_sample(sfr, sample);
- *overflow = perf_push_sample(event, sfr);
+ *overflow = perf_push_sample(event,
+ sample);
}
} else
/* Count discarded samples */
*overflow += 1;
} else {
- debug_sample_entry(sample, te, flags);
+ debug_sample_entry(sample, te);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@@ -1192,8 +1152,8 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
}
/* Reset sample slot and advance to next sample */
- reset_sample_slot(sample, flags);
- sample += sample_size;
+ sample->def = 0;
+ sample++;
}
}
@@ -1219,6 +1179,13 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
int done;
+ /*
+ * AUX buffer is used when in diagnostic sampling mode.
+ * No perf events/samples are created.
+ */
+ if (SAMPL_DIAG_MODE(&event->hw))
+ return;
+
if (flush_all && SDB_FULL_BLOCKS(hwc))
flush_all = 0;
@@ -1295,6 +1262,439 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow, event_overflow);
}
+#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
+#define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0)
+#define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark)
+#define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark)
+
+/*
+ * Get trailer entry by index of SDB.
+ */
+static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux,
+ unsigned long index)
+{
+ unsigned long sdb;
+
+ index = AUX_SDB_INDEX(aux, index);
+ sdb = aux->sdb_index[index];
+ return (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+}
+
+/*
+ * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu
+ * disabled. Collect the full SDBs in AUX buffer which have not reached
+ * the point of alert indicator. And ignore the SDBs which are not
+ * full.
+ *
+ * 1. Scan SDBs to see how much data is there and consume them.
+ * 2. Remove alert indicator in the buffer.
+ */
+static void aux_output_end(struct perf_output_handle *handle)
+{
+ unsigned long i, range_scan, idx;
+ struct aux_buffer *aux;
+ struct hws_trailer_entry *te;
+
+ aux = perf_get_aux(handle);
+ if (!aux)
+ return;
+
+ range_scan = AUX_SDB_NUM_ALERT(aux);
+ for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+ if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
+ break;
+ }
+ /* i is num of SDBs which are full */
+ perf_aux_output_end(handle, i << PAGE_SHIFT);
+
+ /* Remove alert indicators in the buffer */
+ te = aux_sdb_trailer(aux, aux->alert_mark);
+ te->flags &= ~SDB_TE_ALERT_REQ_MASK;
+
+ debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i);
+}
+
+/*
+ * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event
+ * is first added to the CPU or rescheduled again to the CPU. It is called
+ * with pmu disabled.
+ *
+ * 1. Reset the trailer of SDBs to get ready for new data.
+ * 2. Tell the hardware where to put the data by reset the SDBs buffer
+ * head(tear/dear).
+ */
+static int aux_output_begin(struct perf_output_handle *handle,
+ struct aux_buffer *aux,
+ struct cpu_hw_sf *cpuhw)
+{
+ unsigned long range;
+ unsigned long i, range_scan, idx;
+ unsigned long head, base, offset;
+ struct hws_trailer_entry *te;
+
+ if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
+ return -EINVAL;
+
+ aux->head = handle->head >> PAGE_SHIFT;
+ range = (handle->size + 1) >> PAGE_SHIFT;
+ if (range <= 1)
+ return -ENOMEM;
+
+ /*
+ * SDBs between aux->head and aux->empty_mark are already ready
+ * for new data. range_scan is num of SDBs not within them.
+ */
+ if (range > AUX_SDB_NUM_EMPTY(aux)) {
+ range_scan = range - AUX_SDB_NUM_EMPTY(aux);
+ idx = aux->empty_mark + 1;
+ for (i = 0; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+ te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
+ te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
+ te->overflow = 0;
+ }
+ /* Save the position of empty SDBs */
+ aux->empty_mark = aux->head + range - 1;
+ }
+
+ /* Set alert indicator */
+ aux->alert_mark = aux->head + range/2 - 1;
+ te = aux_sdb_trailer(aux, aux->alert_mark);
+ te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
+
+ /* Reset hardware buffer head */
+ head = AUX_SDB_INDEX(aux, aux->head);
+ base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE];
+ offset = head % CPUM_SF_SDB_PER_TABLE;
+ cpuhw->lsctl.tear = base + offset * sizeof(unsigned long);
+ cpuhw->lsctl.dear = aux->sdb_index[head];
+
+ debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
+ "head->alert_mark->empty_mark (num_alert, range)"
+ "[%lx -> %lx -> %lx] (%lx, %lx) "
+ "tear index %lx, tear %lx dear %lx\n",
+ aux->head, aux->alert_mark, aux->empty_mark,
+ AUX_SDB_NUM_ALERT(aux), range,
+ head / CPUM_SF_SDB_PER_TABLE,
+ cpuhw->lsctl.tear,
+ cpuhw->lsctl.dear);
+
+ return 0;
+}
+
+/*
+ * Set alert indicator on SDB at index @alert_index while sampler is running.
+ *
+ * Return true if successfully.
+ * Return false if full indicator is already set by hardware sampler.
+ */
+static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ unsigned long long *overflow)
+{
+ unsigned long long orig_overflow, orig_flags, new_flags;
+ struct hws_trailer_entry *te;
+
+ te = aux_sdb_trailer(aux, alert_index);
+ do {
+ orig_flags = te->flags;
+ orig_overflow = te->overflow;
+ *overflow = orig_overflow;
+ if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
+ /*
+ * SDB is already set by hardware.
+ * Abort and try to set somewhere
+ * behind.
+ */
+ return false;
+ }
+ new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
+ } while (!cmpxchg_double(&te->flags, &te->overflow,
+ orig_flags, orig_overflow,
+ new_flags, 0ULL));
+ return true;
+}
+
+/*
+ * aux_reset_buffer() - Scan and setup SDBs for new samples
+ * @aux: The AUX buffer to set
+ * @range: The range of SDBs to scan started from aux->head
+ * @overflow: Set to overflow count
+ *
+ * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is
+ * marked as empty, check if it is already set full by the hardware sampler.
+ * If yes, that means new data is already there before we can set an alert
+ * indicator. Caller should try to set alert indicator to some position behind.
+ *
+ * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used
+ * previously and have already been consumed by user space. Reset these SDBs
+ * (clear full indicator and alert indicator) for new data.
+ * If aux->alert_mark fall in this area, just set it. Overflow count is
+ * recorded while scanning.
+ *
+ * SDBs between aux->head and aux->empty_mark are already reset at last time.
+ * and ready for new samples. So scanning on this area could be skipped.
+ *
+ * Return true if alert indicator is set successfully and false if not.
+ */
+static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
+ unsigned long long *overflow)
+{
+ unsigned long long orig_overflow, orig_flags, new_flags;
+ unsigned long i, range_scan, idx;
+ struct hws_trailer_entry *te;
+
+ if (range <= AUX_SDB_NUM_EMPTY(aux))
+ /*
+ * No need to scan. All SDBs in range are marked as empty.
+ * Just set alert indicator. Should check race with hardware
+ * sampler.
+ */
+ return aux_set_alert(aux, aux->alert_mark, overflow);
+
+ if (aux->alert_mark <= aux->empty_mark)
+ /*
+ * Set alert indicator on empty SDB. Should check race
+ * with hardware sampler.
+ */
+ if (!aux_set_alert(aux, aux->alert_mark, overflow))
+ return false;
+
+ /*
+ * Scan the SDBs to clear full and alert indicator used previously.
+ * Start scanning from one SDB behind empty_mark. If the new alert
+ * indicator fall into this range, set it.
+ */
+ range_scan = range - AUX_SDB_NUM_EMPTY(aux);
+ idx = aux->empty_mark + 1;
+ for (i = 0; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+ do {
+ orig_flags = te->flags;
+ orig_overflow = te->overflow;
+ new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
+ if (idx == aux->alert_mark)
+ new_flags |= SDB_TE_ALERT_REQ_MASK;
+ else
+ new_flags &= ~SDB_TE_ALERT_REQ_MASK;
+ } while (!cmpxchg_double(&te->flags, &te->overflow,
+ orig_flags, orig_overflow,
+ new_flags, 0ULL));
+ *overflow += orig_overflow;
+ }
+
+ /* Update empty_mark to new position */
+ aux->empty_mark = aux->head + range - 1;
+
+ return true;
+}
+
+/*
+ * Measurement alert handler for diagnostic mode sampling.
+ */
+static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
+{
+ struct aux_buffer *aux;
+ int done = 0;
+ unsigned long range = 0, size;
+ unsigned long long overflow = 0;
+ struct perf_output_handle *handle = &cpuhw->handle;
+ unsigned long num_sdb;
+
+ aux = perf_get_aux(handle);
+ if (WARN_ON_ONCE(!aux))
+ return;
+
+ /* Inform user space new data arrived */
+ size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
+ perf_aux_output_end(handle, size);
+ num_sdb = aux->sfb.num_sdb;
+
+ while (!done) {
+ /* Get an output handle */
+ aux = perf_aux_output_begin(handle, cpuhw->event);
+ if (handle->size == 0) {
+ pr_err("The AUX buffer with %lu pages for the "
+ "diagnostic-sampling mode is full\n",
+ num_sdb);
+ debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n");
+ break;
+ }
+ if (WARN_ON_ONCE(!aux))
+ return;
+
+ /* Update head and alert_mark to new position */
+ aux->head = handle->head >> PAGE_SHIFT;
+ range = (handle->size + 1) >> PAGE_SHIFT;
+ if (range == 1)
+ aux->alert_mark = aux->head;
+ else
+ aux->alert_mark = aux->head + range/2 - 1;
+
+ if (aux_reset_buffer(aux, range, &overflow)) {
+ if (!overflow) {
+ done = 1;
+ break;
+ }
+ size = range << PAGE_SHIFT;
+ perf_aux_output_end(&cpuhw->handle, size);
+ pr_err("Sample data caused the AUX buffer with %lu "
+ "pages to overflow\n", num_sdb);
+ debug_sprintf_event(sfdbg, 1, "head %lx range %lx "
+ "overflow %llx\n",
+ aux->head, range, overflow);
+ } else {
+ size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
+ perf_aux_output_end(&cpuhw->handle, size);
+ debug_sprintf_event(sfdbg, 6, "head %lx alert %lx "
+ "already full, try another\n",
+ aux->head, aux->alert_mark);
+ }
+ }
+
+ if (done)
+ debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
+ "[%lx -> %lx -> %lx] (%lx, %lx)\n",
+ aux->head, aux->alert_mark, aux->empty_mark,
+ AUX_SDB_NUM_ALERT(aux), range);
+}
+
+/*
+ * Callback when freeing AUX buffers.
+ */
+static void aux_buffer_free(void *data)
+{
+ struct aux_buffer *aux = data;
+ unsigned long i, num_sdbt;
+
+ if (!aux)
+ return;
+
+ /* Free SDBT. SDB is freed by the caller */
+ num_sdbt = aux->sfb.num_sdbt;
+ for (i = 0; i < num_sdbt; i++)
+ free_page(aux->sdbt_index[i]);
+
+ kfree(aux->sdbt_index);
+ kfree(aux->sdb_index);
+ kfree(aux);
+
+ debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free "
+ "%lu SDBTs\n", num_sdbt);
+}
+
+/*
+ * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
+ * @cpu: On which to allocate, -1 means current
+ * @pages: Array of pointers to buffer pages passed from perf core
+ * @nr_pages: Total pages
+ * @snapshot: Flag for snapshot mode
+ *
+ * This is the callback when setup an event using AUX buffer. Perf tool can
+ * trigger this by an additional mmap() call on the event. Unlike the buffer
+ * for basic samples, AUX buffer belongs to the event. It is scheduled with
+ * the task among online cpus when it is a per-thread event.
+ *
+ * Return the private AUX buffer structure if success or NULL if fails.
+ */
+static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
+ bool snapshot)
+{
+ struct sf_buffer *sfb;
+ struct aux_buffer *aux;
+ unsigned long *new, *tail;
+ int i, n_sdbt;
+
+ if (!nr_pages || !pages)
+ return NULL;
+
+ if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
+ pr_err("AUX buffer size (%i pages) is larger than the "
+ "maximum sampling buffer limit\n",
+ nr_pages);
+ return NULL;
+ } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
+ pr_err("AUX buffer size (%i pages) is less than the "
+ "minimum sampling buffer limit\n",
+ nr_pages);
+ return NULL;
+ }
+
+ /* Allocate aux_buffer struct for the event */
+ aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
+ if (!aux)
+ goto no_aux;
+ sfb = &aux->sfb;
+
+ /* Allocate sdbt_index for fast reference */
+ n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE;
+ aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL);
+ if (!aux->sdbt_index)
+ goto no_sdbt_index;
+
+ /* Allocate sdb_index for fast reference */
+ aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
+ if (!aux->sdb_index)
+ goto no_sdb_index;
+
+ /* Allocate the first SDBT */
+ sfb->num_sdbt = 0;
+ sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!sfb->sdbt)
+ goto no_sdbt;
+ aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
+ tail = sfb->tail = sfb->sdbt;
+
+ /*
+ * Link the provided pages of AUX buffer to SDBT.
+ * Allocate SDBT if needed.
+ */
+ for (i = 0; i < nr_pages; i++, tail++) {
+ if (require_table_link(tail)) {
+ new = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!new)
+ goto no_sdbt;
+ aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
+ /* Link current page to tail of chain */
+ *tail = (unsigned long)(void *) new + 1;
+ tail = new;
+ }
+ /* Tail is the entry in a SDBT */
+ *tail = (unsigned long)pages[i];
+ aux->sdb_index[i] = (unsigned long)pages[i];
+ }
+ sfb->num_sdb = nr_pages;
+
+ /* Link the last entry in the SDBT to the first SDBT */
+ *tail = (unsigned long) sfb->sdbt + 1;
+ sfb->tail = tail;
+
+ /*
+ * Initial all SDBs are zeroed. Mark it as empty.
+ * So there is no need to clear the full indicator
+ * when this event is first added.
+ */
+ aux->empty_mark = sfb->num_sdb - 1;
+
+ debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs"
+ " and %lu SDBs\n",
+ sfb->num_sdbt, sfb->num_sdb);
+
+ return aux;
+
+no_sdbt:
+ /* SDBs (AUX buffer pages) are freed by caller */
+ for (i = 0; i < sfb->num_sdbt; i++)
+ free_page(aux->sdbt_index[i]);
+ kfree(aux->sdb_index);
+no_sdb_index:
+ kfree(aux->sdbt_index);
+no_sdbt_index:
+ kfree(aux);
+no_aux:
+ return NULL;
+}
+
static void cpumsf_pmu_read(struct perf_event *event)
{
/* Nothing to do ... updates are interrupt-driven */
@@ -1346,12 +1746,13 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
+ struct aux_buffer *aux;
int err;
if (cpuhw->flags & PMU_F_IN_USE)
return -EAGAIN;
- if (!cpuhw->sfb.sdbt)
+ if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
return -EINVAL;
err = 0;
@@ -1366,10 +1767,12 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
*/
cpuhw->lsctl.s = 0;
cpuhw->lsctl.h = 1;
- cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
- cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
- hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
+ if (!SAMPL_DIAG_MODE(&event->hw)) {
+ cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
+ cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
+ hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
+ }
/* Ensure sampling functions are in the disabled state. If disabled,
* switch on sampling enable control. */
@@ -1377,9 +1780,18 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
err = -EAGAIN;
goto out;
}
- cpuhw->lsctl.es = 1;
- if (SAMPL_DIAG_MODE(&event->hw))
+ if (SAMPL_DIAG_MODE(&event->hw)) {
+ aux = perf_aux_output_begin(&cpuhw->handle, event);
+ if (!aux) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = aux_output_begin(&cpuhw->handle, aux, cpuhw);
+ if (err)
+ goto out;
cpuhw->lsctl.ed = 1;
+ }
+ cpuhw->lsctl.es = 1;
/* Set in_use flag and store event */
cpuhw->event = event;
@@ -1405,6 +1817,8 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
cpuhw->flags &= ~PMU_F_IN_USE;
cpuhw->event = NULL;
+ if (SAMPL_DIAG_MODE(&event->hw))
+ aux_output_end(&cpuhw->handle);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
}
@@ -1452,6 +1866,9 @@ static struct pmu cpumf_sampling = {
.read = cpumsf_pmu_read,
.attr_groups = cpumsf_pmu_attr_groups,
+
+ .setup_aux = aux_buffer_setup,
+ .free_aux = aux_buffer_free,
};
static void cpumf_measurement_alert(struct ext_code ext_code,
@@ -1475,7 +1892,10 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* Program alert request */
if (alert & CPU_MF_INT_SF_PRA) {
if (cpuhw->flags & PMU_F_IN_USE)
- hw_perf_event_update(cpuhw->event, 0);
+ if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
+ hw_collect_aux(cpuhw);
+ else
+ hw_perf_event_update(cpuhw->event, 0);
else
WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
}
@@ -1594,6 +2014,9 @@ static int __init init_cpum_sampling_pmu(void)
return -ENODEV;
}
+ if (!si.as && !si.ad)
+ return -ENODEV;
+
if (si.bsdes != sizeof(struct hws_basic_entry)) {
pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
return -EINVAL;
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 93a386f4a3b5..0d770e513abf 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for s390x
*
* Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "perf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
new file mode 100644
index 000000000000..54e2d634b849
--- /dev/null
+++ b/arch/s390/kernel/perf_regs.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/perf_event.h>
+#include <linux/perf_regs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <asm/ptrace.h>
+#include <asm/fpu/api.h>
+#include <asm/fpu/types.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ freg_t fp;
+
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX))
+ return 0;
+
+ if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
+ return regs->gprs[idx];
+
+ if (idx >= PERF_REG_S390_FP0 && idx <= PERF_REG_S390_FP15) {
+ if (!user_mode(regs))
+ return 0;
+
+ idx -= PERF_REG_S390_FP0;
+ fp = MACHINE_HAS_VX ? *(freg_t *)(current->thread.fpu.vxrs + idx)
+ : current->thread.fpu.fprs[idx];
+ return fp.ui;
+ }
+
+ if (idx == PERF_REG_S390_MASK)
+ return regs->psw.mask;
+ if (idx == PERF_REG_S390_PC)
+ return regs->psw.addr;
+
+ return regs->gprs[idx];
+}
+
+#define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ return PERF_SAMPLE_REGS_ABI_32;
+
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ /*
+ * Use the regs from the first interruption and let
+ * perf_sample_regs_intr() handle interrupts (regs == get_irq_regs()).
+ *
+ * Also save FPU registers for user-space tasks only.
+ */
+ regs_user->regs = task_pt_regs(current);
+ if (user_mode(regs_user->regs))
+ save_fpu_regs();
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a4a84fb08046..70576a2f69cf 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -44,27 +44,14 @@ asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
extern void kernel_thread_starter(void);
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(struct task_struct *tsk)
-{
- if (tsk == current) {
- exit_thread_runtime_instr();
- exit_thread_gs();
- }
-}
-
void flush_thread(void)
{
}
-void release_thread(struct task_struct *dead_task)
-{
-}
-
void arch_release_task_struct(struct task_struct *tsk)
{
+ runtime_instr_release(tsk);
+ guarded_storage_release(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -100,6 +87,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
+ p->thread.per_flags = 0;
/* Initialize per thread user and system timer values */
p->thread.user_timer = 0;
p->thread.guest_timer = 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 1427d60ce628..cd3df5514552 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -31,6 +31,9 @@
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/switch_to.h>
+#include <asm/runtime_instr.h>
+#include <asm/facility.h>
+
#include "entry.h"
#ifdef CONFIG_COMPAT
@@ -45,42 +48,42 @@ void update_cr_regs(struct task_struct *task)
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
- unsigned long cr0_old, cr0_new;
- unsigned long cr2_old, cr2_new;
+ union ctlreg0 cr0_old, cr0_new;
+ union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
- __ctl_store(cr0_old, 0, 0);
- __ctl_store(cr2_old, 2, 2);
+ __ctl_store(cr0_old.val, 0, 0);
+ __ctl_store(cr2_old.val, 2, 2);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
/* Set or clear transaction execution TXC bit 8. */
- cr0_new |= (1UL << 55);
+ cr0_new.tcx = 1;
if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr0_new &= ~(1UL << 55);
+ cr0_new.tcx = 0;
/* Set or clear transaction execution TDC bits 62 and 63. */
- cr2_new &= ~3UL;
+ cr2_new.tdc = 0;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
- cr2_new |= 1UL;
+ cr2_new.tdc = 1;
else
- cr2_new |= 2UL;
+ cr2_new.tdc = 2;
}
}
/* Take care of enable/disable of guarded storage. */
if (MACHINE_HAS_GS) {
- cr2_new &= ~(1UL << 4);
+ cr2_new.gse = 0;
if (task->thread.gs_cb)
- cr2_new |= (1UL << 4);
+ cr2_new.gse = 1;
}
/* Load control register 0/2 iff changed */
- cr0_changed = cr0_new != cr0_old;
- cr2_changed = cr2_new != cr2_old;
+ cr0_changed = cr0_new.val != cr0_old.val;
+ cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __ctl_load(cr0_new, 0, 0);
+ __ctl_load(cr0_new.val, 0, 0);
if (cr2_changed)
- __ctl_load(cr2_new, 2, 2);
+ __ctl_load(cr2_new.val, 2, 2);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -1172,26 +1175,37 @@ static int s390_gs_cb_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct gs_cb *data = target->thread.gs_cb;
+ struct gs_cb gs_cb = { }, *data = NULL;
int rc;
if (!MACHINE_HAS_GS)
return -ENODEV;
- if (!data) {
+ if (!target->thread.gs_cb) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->gsd = 25;
- target->thread.gs_cb = data;
- if (target == current)
- __ctl_set_bit(2, 4);
- } else if (target == current) {
- save_gs_cb(data);
}
+ if (!target->thread.gs_cb)
+ gs_cb.gsd = 25;
+ else if (target == current)
+ save_gs_cb(&gs_cb);
+ else
+ gs_cb = *target->thread.gs_cb;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- data, 0, sizeof(struct gs_cb));
- if (target == current)
- restore_gs_cb(data);
+ &gs_cb, 0, sizeof(gs_cb));
+ if (rc) {
+ kfree(data);
+ return -EFAULT;
+ }
+ preempt_disable();
+ if (!target->thread.gs_cb)
+ target->thread.gs_cb = data;
+ *target->thread.gs_cb = gs_cb;
+ if (target == current) {
+ __ctl_set_bit(2, 4);
+ restore_gs_cb(target->thread.gs_cb);
+ }
+ preempt_enable();
return rc;
}
@@ -1229,6 +1243,96 @@ static int s390_gs_bc_set(struct task_struct *target,
data, 0, sizeof(struct gs_cb));
}
+static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
+{
+ return (cb->rca & 0x1f) == 0 &&
+ (cb->roa & 0xfff) == 0 &&
+ (cb->rla & 0xfff) == 0xfff &&
+ cb->s == 1 &&
+ cb->k == 1 &&
+ cb->h == 0 &&
+ cb->reserved1 == 0 &&
+ cb->ps == 1 &&
+ cb->qs == 0 &&
+ cb->pc == 1 &&
+ cb->qc == 0 &&
+ cb->reserved2 == 0 &&
+ cb->key == PAGE_DEFAULT_KEY &&
+ cb->reserved3 == 0 &&
+ cb->reserved4 == 0 &&
+ cb->reserved5 == 0 &&
+ cb->reserved6 == 0 &&
+ cb->reserved7 == 0 &&
+ cb->reserved8 == 0 &&
+ cb->rla >= cb->roa &&
+ cb->rca >= cb->roa &&
+ cb->rca <= cb->rla+1 &&
+ cb->m < 3;
+}
+
+static int s390_runtime_instr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct runtime_instr_cb *data = target->thread.ri_cb;
+
+ if (!test_facility(64))
+ return -ENODEV;
+ if (!data)
+ return -ENODATA;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ data, 0, sizeof(struct runtime_instr_cb));
+}
+
+static int s390_runtime_instr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct runtime_instr_cb ri_cb = { }, *data = NULL;
+ int rc;
+
+ if (!test_facility(64))
+ return -ENODEV;
+
+ if (!target->thread.ri_cb) {
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ }
+
+ if (target->thread.ri_cb) {
+ if (target == current)
+ store_runtime_instr_cb(&ri_cb);
+ else
+ ri_cb = *target->thread.ri_cb;
+ }
+
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &ri_cb, 0, sizeof(struct runtime_instr_cb));
+ if (rc) {
+ kfree(data);
+ return -EFAULT;
+ }
+
+ if (!is_ri_cb_valid(&ri_cb)) {
+ kfree(data);
+ return -EINVAL;
+ }
+
+ preempt_disable();
+ if (!target->thread.ri_cb)
+ target->thread.ri_cb = data;
+ *target->thread.ri_cb = ri_cb;
+ if (target == current)
+ load_runtime_instr_cb(target->thread.ri_cb);
+ preempt_enable();
+
+ return 0;
+}
+
static const struct user_regset s390_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
@@ -1302,6 +1406,14 @@ static const struct user_regset s390_regsets[] = {
.get = s390_gs_bc_get,
.set = s390_gs_bc_set,
},
+ {
+ .core_note_type = NT_S390_RI_CB,
+ .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_runtime_instr_get,
+ .set = s390_runtime_instr_set,
+ },
};
static const struct user_regset_view user_s390_view = {
@@ -1538,6 +1650,22 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_gs_cb_get,
.set = s390_gs_cb_set,
},
+ {
+ .core_note_type = NT_S390_GS_BC,
+ .n = sizeof(struct gs_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_gs_bc_get,
+ .set = s390_gs_bc_set,
+ },
+ {
+ .core_note_type = NT_S390_RI_CB,
+ .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_runtime_instr_get,
+ .set = s390_runtime_instr_set,
+ },
};
static const struct user_regset_view user_s390_compat_view = {
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index ca37e5d5b40c..9c2c96da23d0 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -29,7 +29,6 @@
ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
- stnsm sys_msk-.base(%r13),0xfb # disable DAT
stctg %c0,%c15,ctlregs-.base(%r13)
stmg %r0,%r15,gprregs-.base(%r13)
lghi %r0,3
@@ -103,8 +102,6 @@ ENTRY(relocate_kernel)
.align 8
load_psw:
.long 0x00080000,0x80000000
- sys_msk:
- .quad 0
ctlregs:
.rept 16
.quad 0
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 32aefb215e59..09f5bf0d5c0c 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -21,11 +21,24 @@
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
+void runtime_instr_release(struct task_struct *tsk)
+{
+ kfree(tsk->thread.ri_cb);
+}
+
static void disable_runtime_instr(void)
{
- struct pt_regs *regs = task_pt_regs(current);
+ struct task_struct *task = current;
+ struct pt_regs *regs;
+ if (!task->thread.ri_cb)
+ return;
+ regs = task_pt_regs(task);
+ preempt_disable();
load_runtime_instr_cb(&runtime_instr_empty_cb);
+ kfree(task->thread.ri_cb);
+ task->thread.ri_cb = NULL;
+ preempt_enable();
/*
* Make sure the RI bit is deleted from the PSW. If the user did not
@@ -37,24 +50,13 @@ static void disable_runtime_instr(void)
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
- cb->buf_limit = 0xfff;
- cb->pstate = 1;
- cb->pstate_set_buf = 1;
- cb->pstate_sample = 1;
- cb->pstate_collect = 1;
+ cb->rla = 0xfff;
+ cb->s = 1;
+ cb->k = 1;
+ cb->ps = 1;
+ cb->pc = 1;
cb->key = PAGE_DEFAULT_KEY;
- cb->valid = 1;
-}
-
-void exit_thread_runtime_instr(void)
-{
- struct task_struct *task = current;
-
- if (!task->thread.ri_cb)
- return;
- disable_runtime_instr();
- kfree(task->thread.ri_cb);
- task->thread.ri_cb = NULL;
+ cb->v = 1;
}
SYSCALL_DEFINE1(s390_runtime_instr, int, command)
@@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
- preempt_disable();
- exit_thread_runtime_instr();
- preempt_enable();
+ disable_runtime_instr();
return 0;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 164a1e16b53e..793da97f9a6e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999, 2012
@@ -55,17 +56,18 @@
#include <asm/mmu_context.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
+#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/ebcdic.h>
-#include <asm/kvm_virtio.h>
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
+#include <asm/alternative.h>
#include "entry.h"
/*
@@ -339,16 +341,8 @@ static void __init setup_lowcore(void)
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- unsigned long bits, size;
-
- bits = MACHINE_HAS_GS ? 11 : 10;
- size = 1UL << bits;
- lc->mcesad = (__u64) memblock_virt_alloc(size, size);
- if (MACHINE_HAS_GS)
- lc->mcesad |= bits;
- }
- lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
+ nmi_alloc_boot_cpu(lc);
+ vdso_alloc_boot_cpu(lc);
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -380,6 +374,8 @@ static void __init setup_lowcore(void)
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0);
+ lc->spinlock_index = 0;
+ arch_spin_lock_setup(0);
#endif
set_prefix((u32)(unsigned long) lc);
@@ -764,7 +760,7 @@ static int __init setup_hwcaps(void)
/*
* Transactional execution support HWCAP_S390_TE is bit 10.
*/
- if (test_facility(50) && test_facility(73))
+ if (MACHINE_HAS_TE)
elf_hwcap |= HWCAP_S390_TE;
/*
@@ -955,6 +951,8 @@ void __init setup_arch(char **cmdline_p)
conmode_default();
set_preferred_console();
+ apply_alternative_instructions();
+
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 092c4154abd7..b8c1a85bcf2d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/sched/task_stack.h>
#include <linux/crash_dump.h>
#include <linux/memblock.h>
+#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
#include <asm/switch_to.h>
@@ -54,6 +55,7 @@
#include <asm/sigp.h>
#include <asm/idle.h>
#include <asm/nmi.h>
+#include <asm/topology.h>
#include "entry.h"
enum {
@@ -81,8 +83,6 @@ struct pcpu {
static u8 boot_core_type;
static struct pcpu pcpu_devices[NR_CPUS];
-static struct kmem_cache *pcpu_mcesa_cache;
-
unsigned int smp_cpu_mt_shift;
EXPORT_SYMBOL(smp_cpu_mt_shift);
@@ -193,10 +193,8 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, panic_stack;
- unsigned long mcesa_origin, mcesa_bits;
struct lowcore *lc;
- mcesa_origin = mcesa_bits = 0;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -204,39 +202,30 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
panic_stack = __get_free_page(GFP_KERNEL);
if (!pcpu->lowcore || !panic_stack || !async_stack)
goto out;
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- mcesa_origin = (unsigned long)
- kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
- if (!mcesa_origin)
- goto out;
- /* The pointer is stored with mcesa_bits ORed in */
- kmemleak_not_leak((void *) mcesa_origin);
- mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
- }
} else {
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
- mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
- mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
}
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
- lc->mcesad = mcesa_origin | mcesa_bits;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
- if (vdso_alloc_per_cpu(lc))
+ lc->spinlock_index = 0;
+ if (nmi_alloc_per_cpu(lc))
goto out;
+ if (vdso_alloc_per_cpu(lc))
+ goto out_mcesa;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
+
+out_mcesa:
+ nmi_free_per_cpu(lc);
out:
if (pcpu != &pcpu_devices[0]) {
- if (mcesa_origin)
- kmem_cache_free(pcpu_mcesa_cache,
- (void *) mcesa_origin);
free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@ -248,17 +237,12 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- unsigned long mcesa_origin;
-
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
vdso_free_per_cpu(pcpu->lowcore);
+ nmi_free_per_cpu(pcpu->lowcore);
if (pcpu == &pcpu_devices[0])
return;
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
- kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
- }
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@ -274,6 +258,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
@@ -282,6 +267,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
+ arch_spin_lock_setup(cpu);
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -423,13 +409,17 @@ void smp_yield_cpu(int cpu)
* Send cpus emergency shutdown signal. This gives the cpus the
* opportunity to complete outstanding interrupts.
*/
-static void smp_emergency_stop(cpumask_t *cpumask)
+void notrace smp_emergency_stop(void)
{
+ cpumask_t cpumask;
u64 end;
int cpu;
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
+
end = get_tod_clock() + (1000000UL << 12);
- for_each_cpu(cpu, cpumask) {
+ for_each_cpu(cpu, &cpumask) {
struct pcpu *pcpu = pcpu_devices + cpu;
set_bit(ec_stop_cpu, &pcpu->ec_mask);
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
@@ -438,21 +428,21 @@ static void smp_emergency_stop(cpumask_t *cpumask)
cpu_relax();
}
while (get_tod_clock() < end) {
- for_each_cpu(cpu, cpumask)
+ for_each_cpu(cpu, &cpumask)
if (pcpu_stopped(pcpu_devices + cpu))
- cpumask_clear_cpu(cpu, cpumask);
- if (cpumask_empty(cpumask))
+ cpumask_clear_cpu(cpu, &cpumask);
+ if (cpumask_empty(&cpumask))
break;
cpu_relax();
}
}
+NOKPROBE_SYMBOL(smp_emergency_stop);
/*
* Stop all cpus but the current one.
*/
void smp_send_stop(void)
{
- cpumask_t cpumask;
int cpu;
/* Disable all interrupts/machine checks */
@@ -460,17 +450,16 @@ void smp_send_stop(void)
trace_hardirqs_off();
debug_set_critical();
- cpumask_copy(&cpumask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &cpumask);
if (oops_in_progress)
- smp_emergency_stop(&cpumask);
+ smp_emergency_stop();
/* stop all processors */
- for_each_cpu(cpu, &cpumask) {
- struct pcpu *pcpu = pcpu_devices + cpu;
- pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
- while (!pcpu_stopped(pcpu))
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
+ while (!pcpu_stopped(pcpu_devices + cpu))
cpu_relax();
}
}
@@ -804,6 +793,8 @@ void __init smp_detect_cpus(void)
*/
static void smp_start_secondary(void *cpuvoid)
{
+ int cpu = smp_processor_id();
+
S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long) restart_stack;
S390_lowcore.restart_fn = (unsigned long) do_restart;
@@ -817,8 +808,12 @@ static void smp_start_secondary(void *cpuvoid)
init_cpu_timer();
vtime_init();
pfault_init();
- notify_cpu_starting(smp_processor_id());
- set_cpu_online(smp_processor_id(), true);
+ notify_cpu_starting(cpu);
+ if (topology_cpu_dedicated(cpu))
+ set_cpu_flag(CIF_DEDICATED_CPU);
+ else
+ clear_cpu_flag(CIF_DEDICATED_CPU);
+ set_cpu_online(cpu, true);
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -927,22 +922,12 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long size;
-
/* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- /* create slab cache for the machine-check-extended-save-areas */
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
- pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
- size, size, 0, NULL);
- if (!pcpu_mcesa_cache)
- panic("Couldn't create nmi save area cache");
- }
}
void __init smp_prepare_boot_cpu(void)
@@ -965,6 +950,7 @@ void __init smp_setup_processor_id(void)
pcpu_devices[0].address = stap();
S390_lowcore.cpu_nr = 0;
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
+ S390_lowcore.spinlock_index = 0;
}
/*
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index e66687dc6144..460dcfba7d4e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace management functions
*
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kernel/sthyi.c
index 395926b8c1ed..80b862e9c53c 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -1,29 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* store hypervisor information instruction emulation functions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Copyright IBM Corp. 2016
* Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
*/
-#include <linux/kvm_host.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
-#include <linux/ratelimit.h>
-
-#include <asm/kvm_host.h>
+#include <linux/syscalls.h>
+#include <linux/mutex.h>
#include <asm/asm-offsets.h>
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
-
-#include "kvm-s390.h"
-#include "gaccess.h"
-#include "trace.h"
+#include <asm/facility.h>
+#include <asm/sthyi.h>
+#include "entry.h"
#define DED_WEIGHT 0xffff
/*
@@ -144,6 +138,21 @@ struct lpar_cpu_inf {
struct cpu_inf ifl;
};
+/*
+ * STHYI requires extensive locking in the higher hypervisors
+ * and is very computational/memory expensive. Therefore we
+ * cache the retrieved data whose valid period is 1s.
+ */
+#define CACHE_VALID_JIFFIES HZ
+
+struct sthyi_info {
+ void *info;
+ unsigned long end;
+};
+
+static DEFINE_MUTEX(sthyi_mutex);
+static struct sthyi_info sthyi_cache;
+
static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
{
return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
@@ -382,88 +391,124 @@ out:
vfree(diag204_buf);
}
-static int sthyi(u64 vaddr)
+static int sthyi(u64 vaddr, u64 *rc)
{
register u64 code asm("0") = 0;
register u64 addr asm("2") = vaddr;
+ register u64 rcode asm("3");
int cc;
asm volatile(
".insn rre,0xB2560000,%[code],%[addr]\n"
"ipm %[cc]\n"
"srl %[cc],28\n"
- : [cc] "=d" (cc)
+ : [cc] "=d" (cc), "=d" (rcode)
: [code] "d" (code), [addr] "a" (addr)
- : "3", "memory", "cc");
+ : "memory", "cc");
+ *rc = rcode;
return cc;
}
-int handle_sthyi(struct kvm_vcpu *vcpu)
+static int fill_dst(void *dst, u64 *rc)
{
- int reg1, reg2, r = 0;
- u64 code, addr, cc = 0;
- struct sthyi_sctns *sctns = NULL;
-
- if (!test_kvm_facility(vcpu->kvm, 74))
- return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+ struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
/*
- * STHYI requires extensive locking in the higher hypervisors
- * and is very computational/memory expensive. Therefore we
- * ratelimit the executions per VM.
+ * If the facility is on, we don't want to emulate the instruction.
+ * We ask the hypervisor to provide the data.
*/
- if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) {
- kvm_s390_retry_instr(vcpu);
+ if (test_facility(74))
+ return sthyi((u64)dst, rc);
+
+ fill_hdr(sctns);
+ fill_stsi(sctns);
+ fill_diag(sctns);
+ *rc = 0;
+ return 0;
+}
+
+static int sthyi_init_cache(void)
+{
+ if (sthyi_cache.info)
return 0;
- }
+ sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!sthyi_cache.info)
+ return -ENOMEM;
+ sthyi_cache.end = jiffies - 1; /* expired */
+ return 0;
+}
- kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- code = vcpu->run->s.regs.gprs[reg1];
- addr = vcpu->run->s.regs.gprs[reg2];
+static int sthyi_update_cache(u64 *rc)
+{
+ int r;
- vcpu->stat.instruction_sthyi++;
- VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
- trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+ memset(sthyi_cache.info, 0, PAGE_SIZE);
+ r = fill_dst(sthyi_cache.info, rc);
+ if (r)
+ return r;
+ sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
+ return r;
+}
- if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+/*
+ * sthyi_fill - Fill page with data returned by the STHYI instruction
+ *
+ * @dst: Pointer to zeroed page
+ * @rc: Pointer for storing the return code of the instruction
+ *
+ * Fills the destination with system information returned by the STHYI
+ * instruction. The data is generated by emulation or execution of STHYI,
+ * if available. The return value is the condition code that would be
+ * returned, the rc parameter is the return code which is passed in
+ * register R2 + 1.
+ */
+int sthyi_fill(void *dst, u64 *rc)
+{
+ int r;
- if (code & 0xffff) {
- cc = 3;
+ mutex_lock(&sthyi_mutex);
+ r = sthyi_init_cache();
+ if (r)
goto out;
- }
- if (addr & ~PAGE_MASK)
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ if (time_is_before_jiffies(sthyi_cache.end)) {
+ /* cache expired */
+ r = sthyi_update_cache(rc);
+ if (r)
+ goto out;
+ }
+ *rc = 0;
+ memcpy(dst, sthyi_cache.info, PAGE_SIZE);
+out:
+ mutex_unlock(&sthyi_mutex);
+ return r;
+}
+EXPORT_SYMBOL_GPL(sthyi_fill);
- sctns = (void *)get_zeroed_page(GFP_KERNEL);
- if (!sctns)
+SYSCALL_DEFINE4(s390_sthyi, unsigned long, function_code, void __user *, buffer,
+ u64 __user *, return_code, unsigned long, flags)
+{
+ u64 sthyi_rc;
+ void *info;
+ int r;
+
+ if (flags)
+ return -EINVAL;
+ if (function_code != STHYI_FC_CP_IFL_CAP)
+ return -EOPNOTSUPP;
+ info = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!info)
return -ENOMEM;
-
- /*
- * If we are a guest, we don't want to emulate an emulated
- * instruction. We ask the hypervisor to provide the data.
- */
- if (test_facility(74)) {
- cc = sthyi((u64)sctns);
+ r = sthyi_fill(info, &sthyi_rc);
+ if (r < 0)
+ goto out;
+ if (return_code && put_user(sthyi_rc, return_code)) {
+ r = -EFAULT;
goto out;
}
-
- fill_hdr(sctns);
- fill_stsi(sctns);
- fill_diag(sctns);
-
+ if (copy_to_user(buffer, info, PAGE_SIZE))
+ r = -EFAULT;
out:
- if (!cc) {
- r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
- if (r) {
- free_page((unsigned long)sctns);
- return kvm_s390_inject_prog_cond(vcpu, r);
- }
- }
-
- free_page((unsigned long)sctns);
- vcpu->run->s.regs.gprs[reg2 + 1] = cc ? 4 : 0;
- kvm_s390_set_psw_cc(vcpu, cc);
+ free_page((unsigned long)info);
return r;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index a8af9c825628..ce329c876d8c 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -153,7 +153,7 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
- unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long end_rodata_pfn = PFN_DOWN(__pa(&__end_rodata)) - 1;
unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
/* Always save lowcore pages (LC protection might be enabled). */
@@ -161,9 +161,9 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
- /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
- if (pfn >= stext_pfn && pfn <= eshared_pfn)
- return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ /* Skip memory holes and read-only pages (DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= end_rodata_pfn)
+ return 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index d39f121e67a9..f7fc63385553 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
SYSCALL(sys_socket,sys_socket)
SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
-SYSCALL(sys_bind,sys_bind)
-SYSCALL(sys_connect,sys_connect)
+SYSCALL(sys_bind,compat_sys_bind)
+SYSCALL(sys_connect,compat_sys_connect)
SYSCALL(sys_listen,sys_listen)
-SYSCALL(sys_accept4,sys_accept4)
+SYSCALL(sys_accept4,compat_sys_accept4)
SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
SYSCALL(sys_setsockopt,compat_sys_setsockopt)
SYSCALL(sys_getsockname,compat_sys_getsockname)
@@ -389,3 +389,4 @@ SYSCALL(sys_preadv2,compat_sys_preadv2)
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */
SYSCALL(sys_statx,compat_sys_statx)
+SYSCALL(sys_s390_sthyi,compat_sys_s390_sthyi)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 5cbd52169348..cf561160ea88 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Time of day based timer functions.
*
@@ -523,7 +524,7 @@ static void __init stp_reset(void)
}
}
-static void stp_timeout(unsigned long dummy)
+static void stp_timeout(struct timer_list *unused)
{
queue_work(time_sync_wq, &stp_work);
}
@@ -532,7 +533,7 @@ static int __init stp_init(void)
{
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return 0;
- setup_timer(&stp_timer, stp_timeout, 0UL);
+ timer_setup(&stp_timer, stp_timeout, 0);
time_init_wq();
if (!stp_online)
return 0;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index ed0bdd220e1a..4d5b65e527b5 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -133,6 +134,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
topo->socket_id = socket->id;
topo->core_id = rcore;
topo->thread_id = lcpu + i;
+ topo->dedicated = tl_core->d;
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
@@ -273,6 +275,14 @@ void store_topology(struct sysinfo_15_1_x *info)
stsi(info, 15, 1, topology_mnest_limit());
}
+static void __arch_update_dedicated_flag(void *arg)
+{
+ if (topology_cpu_dedicated(smp_processor_id()))
+ set_cpu_flag(CIF_DEDICATED_CPU);
+ else
+ clear_cpu_flag(CIF_DEDICATED_CPU);
+}
+
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
@@ -298,6 +308,7 @@ int arch_update_cpu_topology(void)
int cpu, rc;
rc = __arch_update_cpu_topology();
+ on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -320,15 +331,14 @@ static void topology_flush_work(void)
flush_work(&topology_work);
}
-static void topology_timer_fn(unsigned long ignored)
+static void topology_timer_fn(struct timer_list *unused)
{
if (ptf(PTF_CHECK))
topology_schedule_update();
set_topology_timer();
}
-static struct timer_list topology_timer =
- TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
+static struct timer_list topology_timer;
static atomic_t topology_poll = ATOMIC_INIT(0);
@@ -435,9 +445,39 @@ static struct attribute_group topology_cpu_attr_group = {
.attrs = topology_cpu_attrs,
};
+static ssize_t cpu_dedicated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cpu = dev->id;
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
+
+static struct attribute *topology_extra_cpu_attrs[] = {
+ &dev_attr_dedicated.attr,
+ NULL,
+};
+
+static struct attribute_group topology_extra_cpu_attr_group = {
+ .attrs = topology_extra_cpu_attrs,
+};
+
int topology_cpu_init(struct cpu *cpu)
{
- return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+ int rc;
+
+ rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+ if (rc || !MACHINE_HAS_TOPOLOGY)
+ return rc;
+ rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
+ if (rc)
+ sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+ return rc;
}
static const struct cpumask *cpu_thread_mask(int cpu)
@@ -509,6 +549,7 @@ void __init topology_init_early(void)
alloc_masks(info, &drawer_info, 3);
out:
__arch_update_cpu_topology();
+ __arch_update_dedicated_flag(NULL);
}
static inline int topology_get_mode(int enabled)
@@ -597,6 +638,7 @@ static struct ctl_table topology_dir_table[] = {
static int __init topology_init(void)
{
+ timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
if (MACHINE_HAS_TOPOLOGY)
set_topology_timer();
else
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index eacda05b45d7..f3a1c7c6824e 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <linux/init.h>
@@ -140,20 +137,27 @@ static void __init vdso_init_data(struct vdso_data *vd)
*/
#define SEGMENT_ORDER 2
+/*
+ * The initial vdso_data structure for the boot CPU. Eventually
+ * it is replaced with a properly allocated structure in vdso_init.
+ * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
+ * pointer is required to be able to return from an interrupt or
+ * program check. See the exit paths in entry.S.
+ */
+struct vdso_data boot_vdso_data __initdata;
+
+void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
+{
+ lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
+}
+
int vdso_alloc_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
struct vdso_per_cpu_data *vd;
- u32 *psal, *aste;
- int i;
-
- lowcore->vdso_per_cpu_data = __LC_PASTE;
-
- if (!vdso_enabled)
- return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
- page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ page_table = get_zeroed_page(GFP_KERNEL);
page_frame = get_zeroed_page(GFP_KERNEL);
if (!segment_table || !page_table || !page_frame)
goto out;
@@ -165,27 +169,15 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
vd->cpu_nr = lowcore->cpu_nr;
vd->node_id = cpu_to_node(vd->cpu_nr);
- /* Set up access register mode page table */
- clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
- PAGE_SIZE << SEGMENT_ORDER);
- clear_table((unsigned long *) page_table, _PAGE_INVALID,
- 256*sizeof(unsigned long));
+ /* Set up page table for the vdso address space */
+ memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
+ memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
- psal = (u32 *) (page_table + 256*sizeof(unsigned long));
- aste = psal + 32;
-
- for (i = 4; i < 32; i += 4)
- psal[i] = 0x80000000;
-
- lowcore->paste[4] = (u32)(addr_t) psal;
- psal[0] = 0x02000000;
- psal[2] = (u32)(addr_t) aste;
- *(unsigned long *) (aste + 2) = segment_table +
+ lowcore->vdso_asce = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
- aste[4] = (u32)(addr_t) psal;
lowcore->vdso_per_cpu_data = page_frame;
return 0;
@@ -200,14 +192,8 @@ out:
void vdso_free_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
- u32 *psal, *aste;
-
- if (!vdso_enabled)
- return;
- psal = (u32 *)(addr_t) lowcore->paste[4];
- aste = (u32 *)(addr_t) psal[2];
- segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
+ segment_table = lowcore->vdso_asce & PAGE_MASK;
page_table = *(unsigned long *) segment_table;
page_frame = *(unsigned long *) page_table;
@@ -216,16 +202,6 @@ void vdso_free_per_cpu(struct lowcore *lowcore)
free_pages(segment_table, SEGMENT_ORDER);
}
-static void vdso_init_cr5(void)
-{
- unsigned long cr5;
-
- if (!vdso_enabled)
- return;
- cr5 = offsetof(struct lowcore, paste);
- __ctl_load(cr5, 5, 5);
-}
-
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
@@ -302,8 +278,6 @@ static int __init vdso_init(void)
{
int i;
- if (!vdso_enabled)
- return 0;
vdso_init_data(vdso_data);
#ifdef CONFIG_COMPAT
/* Calculate the size of the 32 bit vDSO */
@@ -342,7 +316,6 @@ static int __init vdso_init(void)
vdso64_pagelist[vdso64_pages] = NULL;
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
- vdso_init_cr5();
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index eca3f001f081..f61df5253c23 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_getres() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index a5769b83d90e..2d6ec3abe095 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_gettime() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
diff --git a/arch/s390/kernel/vdso32/getcpu.S b/arch/s390/kernel/vdso32/getcpu.S
index 6e30769dd017..5477a2c112fb 100644
--- a/arch/s390/kernel/vdso32/getcpu.S
+++ b/arch/s390/kernel/vdso32/getcpu.S
@@ -15,23 +15,11 @@
.type __kernel_getcpu,@function
__kernel_getcpu:
.cfi_startproc
- ear %r1,%a4
- lhi %r4,1
- sll %r4,24
- sar %a4,%r4
la %r4,0
- epsw %r0,0
- sacf 512
+ sacf 256
l %r5,__VDSO_CPU_NR(%r4)
l %r4,__VDSO_NODE_ID(%r4)
- tml %r0,0x4000
- jo 1f
- tml %r0,0x8000
- jno 0f
- sacf 256
- j 1f
-0: sacf 0
-1: sar %a4,%r1
+ sacf 0
ltr %r2,%r2
jz 2f
st %r5,0(%r2)
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 63b86dceb0bf..aa8bf13a2edb 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of gettimeofday() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index c8513deb8c66..faf5213b15df 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_getres() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 9c3b12626dba..6046b3bfca46 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_gettime() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
@@ -114,23 +111,12 @@ __kernel_clock_gettime:
br %r14
/* CPUCLOCK_VIRT for this thread */
-9: icm %r0,15,__VDSO_ECTG_OK(%r5)
+9: lghi %r4,0
+ icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
- ear %r2,%a4
- llilh %r4,0x0100
- sar %a4,%r4
- lghi %r4,0
- epsw %r5,0
- sacf 512 /* Magic ectg instruction */
+ sacf 256 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
- tml %r5,0x4000
- jo 11f
- tml %r5,0x8000
- jno 10f
- sacf 256
- j 11f
-10: sacf 0
-11: sar %a4,%r2
+ sacf 0
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S
index 43983764b959..e9c34364d97b 100644
--- a/arch/s390/kernel/vdso64/getcpu.S
+++ b/arch/s390/kernel/vdso64/getcpu.S
@@ -15,22 +15,11 @@
.type __kernel_getcpu,@function
__kernel_getcpu:
.cfi_startproc
- ear %r1,%a4
- llilh %r4,0x0100
- sar %a4,%r4
la %r4,0
- epsw %r0,0
- sacf 512
+ sacf 256
l %r5,__VDSO_CPU_NR(%r4)
l %r4,__VDSO_NODE_ID(%r4)
- tml %r0,0x4000
- jo 1f
- tml %r0,0x8000
- jno 0f
- sacf 256
- j 1f
-0: sacf 0
-1: sar %a4,%r1
+ sacf 0
ltgr %r2,%r2
jz 2f
st %r5,0(%r2)
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index b02e62f3bc12..cc9dbc27da6f 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S
index 79a071e4357e..db19d0680a0a 100644
--- a/arch/s390/kernel/vdso64/note.S
+++ b/arch/s390/kernel/vdso64/note.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 96a713a470e7..a049ff005f03 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -60,12 +60,7 @@ SECTIONS
RO_DATA_SECTION(PAGE_SIZE)
-#ifdef CONFIG_SHARED_KERNEL
- . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */
-#endif
-
. = ALIGN(PAGE_SIZE);
- _eshared = .; /* End of shareable data */
_sdata = .; /* Start of data section */
. = ALIGN(PAGE_SIZE);
@@ -105,6 +100,29 @@ SECTIONS
EXIT_DATA
}
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ * Note, that it is a part of __init region.
+ */
+ . = ALIGN(8);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ /*
+ * And here are the replacement instructions. The linker sticks
+ * them as binary blobs. The .altinstructions has enough data to
+ * get the address and the length of them to patch the kernel safely.
+ * Note, that it is a part of __init region.
+ */
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index dd7178fbb4f3..f24395a01918 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Virtual cpu timer based timer functions.
*
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 09a9e6dfc09f..05ee90a5ea08 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -1,10 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for kernel virtual machines on s390
#
# Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
KVM := ../../../virt/kvm
common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
@@ -12,6 +9,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-objs += diag.o gaccess.o guestdbg.o sthyi.o vsie.o
+kvm-objs += diag.o gaccess.o guestdbg.o vsie.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index d93a2c0474bf..89aa114a2cba 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* handling diagnose instructions
*
* Copyright IBM Corp. 2008, 2011
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index bec42b852246..f4c51756c462 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* access guest memory
*
* Copyright IBM Corp. 2008, 2014
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index bcbd86621d01..b5f3e82006d0 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kvm guest debug support
*
* Copyright IBM Corp. 2014
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
*/
#include <linux/kvm_host.h>
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index a4752bf6b526..9c7d70715862 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* in-kernel handling for sie intercepts
*
* Copyright IBM Corp. 2008, 2014
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
@@ -18,6 +15,7 @@
#include <asm/kvm_host.h>
#include <asm/asm-offsets.h>
#include <asm/irq.h>
+#include <asm/sysinfo.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -360,6 +358,61 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+/*
+ * Handle the sthyi instruction that provides the guest with system
+ * information, like current CPU resources available at each level of
+ * the machine.
+ */
+int handle_sthyi(struct kvm_vcpu *vcpu)
+{
+ int reg1, reg2, r = 0;
+ u64 code, addr, cc = 0, rc = 0;
+ struct sthyi_sctns *sctns = NULL;
+
+ if (!test_kvm_facility(vcpu->kvm, 74))
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
+ kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
+ code = vcpu->run->s.regs.gprs[reg1];
+ addr = vcpu->run->s.regs.gprs[reg2];
+
+ vcpu->stat.instruction_sthyi++;
+ VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
+ trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+
+ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ if (code & 0xffff) {
+ cc = 3;
+ rc = 4;
+ goto out;
+ }
+
+ if (addr & ~PAGE_MASK)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ sctns = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!sctns)
+ return -ENOMEM;
+
+ cc = sthyi_fill(sctns, &rc);
+
+out:
+ if (!cc) {
+ r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
+ if (r) {
+ free_page((unsigned long)sctns);
+ return kvm_s390_inject_prog_cond(vcpu, r);
+ }
+ }
+
+ free_page((unsigned long)sctns);
+ vcpu->run->s.regs.gprs[reg2 + 1] = rc;
+ kvm_s390_set_psw_cc(vcpu, cc);
+ return r;
+}
+
static int handle_operexc(struct kvm_vcpu *vcpu)
{
psw_t oldpsw, newpsw;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a832ad031cee..024ad8bcc516 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* handling kvm guest interrupts
*
* Copyright IBM Corp. 2008, 2015
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
@@ -213,6 +210,16 @@ static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
vcpu->arch.local_int.pending_irqs;
}
+static inline int isc_to_irq_type(unsigned long isc)
+{
+ return IRQ_PEND_IO_ISC_0 + isc;
+}
+
+static inline int irq_type_to_isc(unsigned long irq_type)
+{
+ return irq_type - IRQ_PEND_IO_ISC_0;
+}
+
static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
unsigned long active_mask)
{
@@ -220,7 +227,7 @@ static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
for (i = 0; i <= MAX_ISC; i++)
if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
- active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
+ active_mask &= ~(1UL << (isc_to_irq_type(i)));
return active_mask;
}
@@ -901,7 +908,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
fi = &vcpu->kvm->arch.float_int;
spin_lock(&fi->lock);
- isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
+ isc_list = &fi->lists[irq_type_to_isc(irq_type)];
inti = list_first_entry_or_null(isc_list,
struct kvm_s390_interrupt_info,
list);
@@ -1074,6 +1081,12 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
* in kvm_vcpu_block without having the waitqueue set (polling)
*/
vcpu->valid_wakeup = true;
+ /*
+ * This is mostly to document, that the read in swait_active could
+ * be moved before other stores, leading to subtle races.
+ * All current users do not store or use an atomic like update
+ */
+ smp_mb__after_atomic();
if (swait_active(&vcpu->wq)) {
/*
* The vcpu gave up the cpu voluntarily, mark it as a good
@@ -1395,7 +1408,7 @@ static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
list_del_init(&iter->list);
fi->counters[FIRQ_CNTR_IO] -= 1;
if (list_empty(isc_list))
- clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
+ clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
spin_unlock(&fi->lock);
return iter;
}
@@ -1522,7 +1535,7 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
isc = int_word_to_isc(inti->io.io_int_word);
list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
list_add_tail(&inti->list, list);
- set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
+ set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
spin_unlock(&fi->lock);
return 0;
}
@@ -2175,6 +2188,8 @@ static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
return -EINVAL;
if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
return -EFAULT;
+ if (!schid)
+ return -EINVAL;
kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
/*
* If userspace is conforming to the architecture, we can have at most
@@ -2483,11 +2498,11 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
mci.val = mcck_info->mcic;
if (mci.sr)
- cr14 |= MCCK_CR14_RECOVERY_SUB_MASK;
+ cr14 |= CR14_RECOVERY_SUBMASK;
if (mci.dg)
- cr14 |= MCCK_CR14_DEGRAD_SUB_MASK;
+ cr14 |= CR14_DEGRADATION_SUBMASK;
if (mci.w)
- cr14 |= MCCK_CR14_WARN_SUB_MASK;
+ cr14 |= CR14_WARNING_SUBMASK;
mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
mchk->cr14 = cr14;
diff --git a/arch/s390/kvm/irq.h b/arch/s390/kvm/irq.h
index d98e4159643d..484608c71dd0 100644
--- a/arch/s390/kvm/irq.h
+++ b/arch/s390/kvm/irq.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* s390 irqchip routines
*
* Copyright IBM Corp. 2014
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*/
#ifndef __KVM_IRQ_H
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 40d0a1a97889..1371dff2b90d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * hosting zSeries kernel virtual machines
+ * hosting IBM Z kernel virtual machines (s390x)
*
- * Copyright IBM Corp. 2008, 2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
+ * Copyright IBM Corp. 2008, 2017
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -395,6 +392,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_USER_INSTR0:
case KVM_CAP_S390_CMMA_MIGRATION:
case KVM_CAP_S390_AIS:
+ case KVM_CAP_S390_AIS_MIGRATION:
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -423,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_GS:
r = test_facility(133);
break;
+ case KVM_CAP_S390_BPB:
+ r = test_facility(82);
+ break;
default:
r = 0;
}
@@ -768,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
/*
* Must be called with kvm->srcu held to avoid races on memslots, and with
- * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
+ * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
*/
static int kvm_s390_vm_start_migration(struct kvm *kvm)
{
@@ -794,11 +795,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
if (kvm->arch.use_cmma) {
/*
- * Get the last slot. They should be sorted by base_gfn, so the
- * last slot is also the one at the end of the address space.
- * We have verified above that at least one slot is present.
+ * Get the first slot. They are reverse sorted by base_gfn, so
+ * the first slot is also the one at the end of the address
+ * space. We have verified above that at least one slot is
+ * present.
*/
- ms = slots->memslots + slots->used_slots - 1;
+ ms = slots->memslots;
/* round up so we only use full longs */
ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
/* allocate enough bytes to store all the bits */
@@ -823,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
}
/*
- * Must be called with kvm->lock to avoid races with ourselves and
+ * Must be called with kvm->slots_lock to avoid races with ourselves and
* kvm_s390_vm_start_migration.
*/
static int kvm_s390_vm_stop_migration(struct kvm *kvm)
@@ -838,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
if (kvm->arch.use_cmma) {
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
+ /* We have to wait for the essa emulation to finish */
+ synchronize_srcu(&kvm->srcu);
vfree(mgs->pgste_bitmap);
}
kfree(mgs);
@@ -847,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
static int kvm_s390_vm_set_migration(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- int idx, res = -ENXIO;
+ int res = -ENXIO;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_S390_VM_MIGRATION_START:
- idx = srcu_read_lock(&kvm->srcu);
res = kvm_s390_vm_start_migration(kvm);
- srcu_read_unlock(&kvm->srcu, idx);
break;
case KVM_S390_VM_MIGRATION_STOP:
res = kvm_s390_vm_stop_migration(kvm);
@@ -862,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
default:
break;
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->slots_lock);
return res;
}
@@ -1752,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
+ mutex_lock(&kvm->slots_lock);
r = kvm_s390_get_cmma_bits(kvm, &args);
+ mutex_unlock(&kvm->slots_lock);
if (!r) {
r = copy_to_user(argp, &args, sizeof(args));
if (r)
@@ -1766,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
+ mutex_lock(&kvm->slots_lock);
r = kvm_s390_set_cmma_bits(kvm, &args);
+ mutex_unlock(&kvm->slots_lock);
break;
}
default:
@@ -1884,8 +1890,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM;
- ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
-
kvm->arch.use_esca = 0; /* start with basic SCA */
if (!sclp.has_64bscao)
alloc_flags |= GFP_DMA;
@@ -2201,6 +2205,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+ if (test_kvm_facility(vcpu->kvm, 82))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
if (test_kvm_facility(vcpu->kvm, 133))
vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2342,6 +2348,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3283,7 +3290,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
test_kvm_facility(vcpu->kvm, 64) &&
- riccb->valid &&
+ riccb->v &&
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
@@ -3301,6 +3308,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu->arch.gs_enabled = 1;
}
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+ test_kvm_facility(vcpu->kvm, 82)) {
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+ }
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
@@ -3347,6 +3359,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
@@ -3373,7 +3386,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int rc;
- sigset_t sigsaved;
if (kvm_run->immediate_exit)
return -EINTR;
@@ -3383,8 +3395,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 0;
}
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+ kvm_sigset_activate(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
@@ -3418,8 +3429,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
disable_cpu_timer_accounting(vcpu);
store_regs(vcpu, kvm_run);
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ kvm_sigset_deactivate(vcpu);
vcpu->stat.exit_userspace++;
return rc;
@@ -3812,6 +3822,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EINVAL;
break;
}
+ /* do not use irq_state.flags, it will break old QEMUs */
r = kvm_s390_set_irq_state(vcpu,
(void __user *) irq_state.buf,
irq_state.len);
@@ -3827,6 +3838,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EINVAL;
break;
}
+ /* do not use irq_state.flags, it will break old QEMUs */
r = kvm_s390_get_irq_state(vcpu,
(__u8 __user *) irq_state.buf,
irq_state.len);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 9f8fdd7b2311..5e46ba429bcb 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for kvm on s390
*
* Copyright IBM Corp. 2008, 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
* Christian Ehrhardt <ehrhardt@de.ibm.com>
@@ -242,6 +239,8 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
+int handle_sthyi(struct kvm_vcpu *vcpu);
+
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
@@ -268,9 +267,6 @@ void kvm_s390_vsie_destroy(struct kvm *kvm);
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
-/* implemented in sthyi.c */
-int handle_sthyi(struct kvm_vcpu *vcpu);
-
/* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
const struct kvm_s390_vm_tod_clock *gtod);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c954ac49eee4..0714bfa56da0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* handling privileged instructions
*
* Copyright IBM Corp. 2008, 2013
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
@@ -235,8 +232,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
return -EAGAIN;
}
- if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
- return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
return 0;
}
@@ -247,6 +242,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
int reg1, reg2;
int rc;
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
rc = try_handle_skey(vcpu);
if (rc)
return rc != -EAGAIN ? rc : 0;
@@ -276,6 +274,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
int reg1, reg2;
int rc;
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
rc = try_handle_skey(vcpu);
if (rc)
return rc != -EAGAIN ? rc : 0;
@@ -311,6 +312,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
int reg1, reg2;
int rc;
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
rc = try_handle_skey(vcpu);
if (rc)
return rc != -EAGAIN ? rc : 0;
@@ -1002,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
cbrlo[entries] = gfn << PAGE_SHIFT;
}
- if (orc) {
+ if (orc && gfn < ms->bitmap_size) {
/* increment only if we are really flipping the bit to 1 */
if (!test_and_set_bit(gfn, ms->pgste_bitmap))
atomic64_inc(&ms->dirty_pages);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 9d592ef4104b..c1f5cde2c878 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* handling interprocessor communication
*
* Copyright IBM Corp. 2008, 2013
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
* Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index b18b5652e5c5..751348348477 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kvm nested virtualization support for s390x
*
* Copyright IBM Corp. 2016
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
*/
#include <linux/vmalloc.h>
@@ -226,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
memcpy(scb_o->gcr, scb_s->gcr, 128);
scb_o->pp = scb_s->pp;
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82)) {
+ scb_o->fpf &= ~FPF_BPBC;
+ scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+ }
+
/* interrupt intercept */
switch (scb_s->icptcode) {
case ICPT_PROGI:
@@ -268,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->ecb3 = 0;
scb_s->ecd = 0;
scb_s->fac = 0;
+ scb_s->fpf = 0;
rc = prepare_cpuflags(vcpu, vsie_page);
if (rc)
@@ -327,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
prefix_unmapped(vsie_page);
scb_s->ecb |= scb_o->ecb & ECB_TE;
}
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82))
+ scb_s->fpf |= scb_o->fpf & FPF_BPBC;
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
scb_s->eca |= scb_o->eca & ECA_VX;
@@ -443,22 +450,14 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
*
* Returns: - 0 on success
* - -EINVAL if the gpa is not valid guest storage
- * - -ENOMEM if out of memory
*/
static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
{
struct page *page;
- hva_t hva;
- int rc;
- hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
- if (kvm_is_error_hva(hva))
+ page = gfn_to_page(kvm, gpa_to_gfn(gpa));
+ if (is_error_page(page))
return -EINVAL;
- rc = get_user_pages_fast(hva, 1, 1, &page);
- if (rc < 0)
- return rc;
- else if (rc != 1)
- return -ENOMEM;
*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
return 0;
}
@@ -466,11 +465,7 @@ static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
{
- struct page *page;
-
- page = virt_to_page(hpa);
- set_page_dirty_lock(page);
- put_page(page);
+ kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
/* mark the page always as dirty for migration */
mark_page_dirty(kvm, gpa_to_gfn(gpa));
}
@@ -557,7 +552,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
rc = set_validity_icpt(scb_s, 0x003bU);
if (!rc) {
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
- if (rc == -EINVAL)
+ if (rc)
rc = set_validity_icpt(scb_s, 0x0034U);
}
if (rc)
@@ -574,10 +569,10 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
/* 256 bytes cannot cross page boundaries */
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
- if (rc == -EINVAL)
+ if (rc) {
rc = set_validity_icpt(scb_s, 0x0080U);
- if (rc)
goto unpin;
+ }
scb_s->itdba = hpa;
}
@@ -592,10 +587,10 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
* if this block gets bigger, we have to shadow it.
*/
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
- if (rc == -EINVAL)
+ if (rc) {
rc = set_validity_icpt(scb_s, 0x1310U);
- if (rc)
goto unpin;
+ }
scb_s->gvrd = hpa;
}
@@ -607,11 +602,11 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
/* 64 bytes cannot cross page boundaries */
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
- if (rc == -EINVAL)
+ if (rc) {
rc = set_validity_icpt(scb_s, 0x0043U);
- /* Validity 0x0044 will be checked by SIE */
- if (rc)
goto unpin;
+ }
+ /* Validity 0x0044 will be checked by SIE */
scb_s->riccbd = hpa;
}
if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
@@ -635,10 +630,10 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
* cross page boundaries
*/
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
- if (rc == -EINVAL)
+ if (rc) {
rc = set_validity_icpt(scb_s, 0x10b0U);
- if (rc)
goto unpin;
+ }
scb_s->sdnxo = hpa | sdnxc;
}
return 0;
@@ -663,7 +658,6 @@ static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
*
* Returns: - 0 if the scb was pinned.
* - > 0 if control has to be given to guest 2
- * - -ENOMEM if out of memory
*/
static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
gpa_t gpa)
@@ -672,14 +666,13 @@ static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
int rc;
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
- if (rc == -EINVAL) {
+ if (rc) {
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- if (!rc)
- rc = 1;
+ WARN_ON_ONCE(rc);
+ return 1;
}
- if (!rc)
- vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
- return rc;
+ vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
+ return 0;
}
/*
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index d66751397e72..495c9c4bacc7 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -79,21 +79,25 @@ ENTRY(memset)
ex %r4,0(%r3)
br %r14
.Lmemset_fill:
- stc %r3,0(%r2)
cghi %r4,1
lgr %r1,%r2
- ber %r14
+ je .Lmemset_fill_exit
aghi %r4,-2
- srlg %r3,%r4,8
- ltgr %r3,%r3
+ srlg %r5,%r4,8
+ ltgr %r5,%r5
jz .Lmemset_fill_remainder
.Lmemset_fill_loop:
- mvc 1(256,%r1),0(%r1)
+ stc %r3,0(%r1)
+ mvc 1(255,%r1),0(%r1)
la %r1,256(%r1)
- brctg %r3,.Lmemset_fill_loop
+ brctg %r5,.Lmemset_fill_loop
.Lmemset_fill_remainder:
- larl %r3,.Lmemset_mvc
- ex %r4,0(%r3)
+ stc %r3,0(%r1)
+ larl %r5,.Lmemset_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemset_fill_exit:
+ stc %r3,0(%r1)
br %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
@@ -127,3 +131,47 @@ ENTRY(memcpy)
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memcpy)
+
+/*
+ * __memset16/32/64
+ *
+ * void *__memset16(uint16_t *s, uint16_t v, size_t count)
+ * void *__memset32(uint32_t *s, uint32_t v, size_t count)
+ * void *__memset64(uint64_t *s, uint64_t v, size_t count)
+ */
+.macro __MEMSET bits,bytes,insn
+ENTRY(__memset\bits)
+ ltgr %r4,%r4
+ bzr %r14
+ cghi %r4,\bytes
+ je .L__memset_exit\bits
+ aghi %r4,-(\bytes+1)
+ srlg %r5,%r4,8
+ ltgr %r5,%r5
+ lgr %r1,%r2
+ jz .L__memset_remainder\bits
+.L__memset_loop\bits:
+ \insn %r3,0(%r1)
+ mvc \bytes(256-\bytes,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r5,.L__memset_loop\bits
+.L__memset_remainder\bits:
+ \insn %r3,0(%r1)
+ larl %r5,.L__memset_mvc\bits
+ ex %r4,0(%r5)
+ br %r14
+.L__memset_exit\bits:
+ \insn %r3,0(%r2)
+ br %r14
+.L__memset_mvc\bits:
+ mvc \bytes(1,%r1),0(%r1)
+.endm
+
+__MEMSET 16,2,sth
+EXPORT_SYMBOL(__memset16)
+
+__MEMSET 32,4,st
+EXPORT_SYMBOL(__memset32)
+
+__MEMSET 64,8,stg
+EXPORT_SYMBOL(__memset64)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 1dc85f552f48..30a7c8c29964 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -9,8 +9,11 @@
#include <linux/types.h>
#include <linux/export.h>
#include <linux/spinlock.h>
+#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <asm/alternative.h>
#include <asm/io.h>
int spin_retry = -1;
@@ -33,17 +36,49 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+struct spin_wait {
+ struct spin_wait *next, *prev;
+ int node_id;
+} __aligned(32);
+
+static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
+
+#define _Q_LOCK_CPU_OFFSET 0
+#define _Q_LOCK_STEAL_OFFSET 16
+#define _Q_TAIL_IDX_OFFSET 18
+#define _Q_TAIL_CPU_OFFSET 20
+
+#define _Q_LOCK_CPU_MASK 0x0000ffff
+#define _Q_LOCK_STEAL_ADD 0x00010000
+#define _Q_LOCK_STEAL_MASK 0x00030000
+#define _Q_TAIL_IDX_MASK 0x000c0000
+#define _Q_TAIL_CPU_MASK 0xfff00000
+
+#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+void arch_spin_lock_setup(int cpu)
+{
+ struct spin_wait *node;
+ int ix;
+
+ node = per_cpu_ptr(&spin_wait[0], cpu);
+ for (ix = 0; ix < 4; ix++, node++) {
+ memset(node, 0, sizeof(*node));
+ node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
+ (ix << _Q_TAIL_IDX_OFFSET);
+ }
+}
+
static inline int arch_load_niai4(int *lock)
{
int owner;
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0040\n" /* NIAI 4 */
-#endif
+ ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
- return owner;
+ return owner;
}
static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
@@ -51,9 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
int expected = old;
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0080\n" /* NIAI 8 */
-#endif
+ ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
@@ -61,75 +94,161 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
return expected == old;
}
-void arch_spin_lock_wait(arch_spinlock_t *lp)
+static inline struct spin_wait *arch_spin_decode_tail(int lock)
{
- int cpu = SPINLOCK_LOCKVAL;
- int owner, count;
+ int ix, cpu;
+
+ ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
+ cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
+ return per_cpu_ptr(&spin_wait[ix], cpu - 1);
+}
+
+static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
+{
+ if (lock & _Q_LOCK_CPU_MASK)
+ return lock & _Q_LOCK_CPU_MASK;
+ if (node == NULL || node->prev == NULL)
+ return 0; /* 0 -> no target cpu */
+ while (node->prev)
+ node = node->prev;
+ return node->node_id >> _Q_TAIL_CPU_OFFSET;
+}
+
+static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
+{
+ struct spin_wait *node, *next;
+ int lockval, ix, node_id, tail_id, old, new, owner, count;
+
+ ix = S390_lowcore.spinlock_index++;
+ barrier();
+ lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ node = this_cpu_ptr(&spin_wait[ix]);
+ node->prev = node->next = NULL;
+ node_id = node->node_id;
+
+ /* Enqueue the node for this CPU in the spinlock wait queue */
+ while (1) {
+ old = READ_ONCE(lp->lock);
+ if ((old & _Q_LOCK_CPU_MASK) == 0 &&
+ (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
+ /*
+ * The lock is free but there may be waiters.
+ * With no waiters simply take the lock, if there
+ * are waiters try to steal the lock. The lock may
+ * be stolen three times before the next queued
+ * waiter will get the lock.
+ */
+ new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ /* Got the lock */
+ goto out;
+ /* lock passing in progress */
+ continue;
+ }
+ /* Make the node of this CPU the new tail. */
+ new = node_id | (old & _Q_LOCK_MASK);
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ break;
+ }
+ /* Set the 'next' pointer of the tail node in the queue */
+ tail_id = old & _Q_TAIL_MASK;
+ if (tail_id != 0) {
+ node->prev = arch_spin_decode_tail(tail_id);
+ WRITE_ONCE(node->prev->next, node);
+ }
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_load_niai4(&lp->lock);
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ owner = arch_spin_yield_target(old, node);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
+
+ /* Spin on the CPU local node->prev pointer */
+ if (tail_id != 0) {
+ count = spin_retry;
+ while (READ_ONCE(node->prev) != NULL) {
+ if (count-- >= 0)
+ continue;
+ count = spin_retry;
+ /* Query running state of lock holder again. */
+ owner = arch_spin_yield_target(old, node);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
+ }
+ }
+ /* Spin on the lock value in the spinlock_t */
count = spin_retry;
while (1) {
- owner = arch_load_niai4(&lp->lock);
- /* Try to get the lock if it is free. */
+ old = READ_ONCE(lp->lock);
+ owner = old & _Q_LOCK_CPU_MASK;
if (!owner) {
- if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
- return;
+ tail_id = old & _Q_TAIL_MASK;
+ new = ((tail_id != node_id) ? tail_id : 0) | lockval;
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ /* Got the lock */
+ break;
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
- /*
- * For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU unconditionally. For LPAR rely on the
- * sense running status.
- */
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
}
+
+ /* Pass lock_spin job to next CPU in the queue */
+ if (node_id && tail_id != node_id) {
+ /* Wait until the next CPU has set up the 'next' pointer */
+ while ((next = READ_ONCE(node->next)) == NULL)
+ ;
+ next->prev = NULL;
+ }
+
+ out:
+ S390_lowcore.spinlock_index--;
}
-EXPORT_SYMBOL(arch_spin_lock_wait);
-void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
+static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
{
- int cpu = SPINLOCK_LOCKVAL;
- int owner, count;
+ int lockval, old, new, owner, count;
- local_irq_restore(flags);
+ lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_load_niai4(&lp->lock);
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
count = spin_retry;
while (1) {
- owner = arch_load_niai4(&lp->lock);
+ old = arch_load_niai4(&lp->lock);
+ owner = old & _Q_LOCK_CPU_MASK;
/* Try to get the lock if it is free. */
if (!owner) {
- local_irq_disable();
- if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
+ new = (old & _Q_TAIL_MASK) | lockval;
+ if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
+ /* Got the lock */
return;
- local_irq_restore(flags);
+ }
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
- /*
- * For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU unconditionally. For LPAR rely on the
- * sense running status.
- */
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
}
}
-EXPORT_SYMBOL(arch_spin_lock_wait_flags);
+
+void arch_spin_lock_wait(arch_spinlock_t *lp)
+{
+ /* Use classic spinlocks + niai if the steal time is >= 10% */
+ if (test_cpu_flag(CIF_DEDICATED_CPU))
+ arch_spin_lock_queued(lp);
+ else
+ arch_spin_lock_classic(lp);
+}
+EXPORT_SYMBOL(arch_spin_lock_wait);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
@@ -148,126 +267,59 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
-void _raw_read_lock_wait(arch_rwlock_t *rw)
+void arch_read_lock_wait(arch_rwlock_t *rw)
{
- int count = spin_retry;
- int owner, old;
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
-#endif
- owner = 0;
- while (1) {
- if (count-- <= 0) {
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
- if (old < 0)
- continue;
- if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
- return;
+ if (unlikely(in_interrupt())) {
+ while (READ_ONCE(rw->cnts) & 0x10000)
+ barrier();
+ return;
}
+
+ /* Remove this reader again to allow recursive read locking */
+ __atomic_add_const(-1, &rw->cnts);
+ /* Put the reader into the wait queue */
+ arch_spin_lock(&rw->wait);
+ /* Now add this reader to the count value again */
+ __atomic_add_const(1, &rw->cnts);
+ /* Loop until the writer is done */
+ while (READ_ONCE(rw->cnts) & 0x10000)
+ barrier();
+ arch_spin_unlock(&rw->wait);
}
-EXPORT_SYMBOL(_raw_read_lock_wait);
+EXPORT_SYMBOL(arch_read_lock_wait);
-int _raw_read_trylock_retry(arch_rwlock_t *rw)
+void arch_write_lock_wait(arch_rwlock_t *rw)
{
- int count = spin_retry;
int old;
- while (count-- > 0) {
- old = ACCESS_ONCE(rw->lock);
- if (old < 0)
- continue;
- if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(_raw_read_trylock_retry);
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ /* Add this CPU to the write waiters */
+ __atomic_add(0x20000, &rw->cnts);
-void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
-{
- int count = spin_retry;
- int owner, old;
+ /* Put the writer into the wait queue */
+ arch_spin_lock(&rw->wait);
- owner = 0;
while (1) {
- if (count-- <= 0) {
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
- smp_mb();
- if (old >= 0) {
- prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
- old = prev;
- }
- if ((old & 0x7fffffff) == 0 && prev >= 0)
+ old = READ_ONCE(rw->cnts);
+ if ((old & 0x1ffff) == 0 &&
+ __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
+ /* Got the lock */
break;
+ barrier();
}
-}
-EXPORT_SYMBOL(_raw_write_lock_wait);
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-void _raw_write_lock_wait(arch_rwlock_t *rw)
-{
- int count = spin_retry;
- int owner, old, prev;
- prev = 0x80000000;
- owner = 0;
- while (1) {
- if (count-- <= 0) {
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
- if (old >= 0 &&
- __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
- prev = old;
- else
- smp_mb();
- if ((old & 0x7fffffff) == 0 && prev >= 0)
- break;
- }
+ arch_spin_unlock(&rw->wait);
}
-EXPORT_SYMBOL(_raw_write_lock_wait);
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+EXPORT_SYMBOL(arch_write_lock_wait);
-int _raw_write_trylock_retry(arch_rwlock_t *rw)
+void arch_spin_relax(arch_spinlock_t *lp)
{
- int count = spin_retry;
- int old;
+ int cpu;
- while (count-- > 0) {
- old = ACCESS_ONCE(rw->lock);
- if (old)
- continue;
- if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(_raw_write_trylock_retry);
-
-void arch_lock_relax(int cpu)
-{
+ cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
return;
- smp_yield_cpu(~cpu);
+ smp_yield_cpu(cpu - 1);
}
-EXPORT_SYMBOL(arch_lock_relax);
+EXPORT_SYMBOL(arch_spin_relax);
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index dbf2fdad2724..a10e11f7a5f7 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(strlen);
*
* returns the minimum of the length of @s and @n
*/
-size_t strnlen(const char * s, size_t n)
+size_t strnlen(const char *s, size_t n)
{
return __strnend(s, n) - s;
}
@@ -195,14 +195,14 @@ EXPORT_SYMBOL(strncat);
/**
* strcmp - Compare two strings
- * @cs: One string
- * @ct: Another string
+ * @s1: One string
+ * @s2: Another string
*
- * returns 0 if @cs and @ct are equal,
- * < 0 if @cs is less than @ct
- * > 0 if @cs is greater than @ct
+ * returns 0 if @s1 and @s2 are equal,
+ * < 0 if @s1 is less than @s2
+ * > 0 if @s1 is greater than @s2
*/
-int strcmp(const char *cs, const char *ct)
+int strcmp(const char *s1, const char *s2)
{
register int r0 asm("0") = 0;
int ret = 0;
@@ -214,7 +214,7 @@ int strcmp(const char *cs, const char *ct)
" ic %1,0(%3)\n"
" sr %0,%1\n"
"1:"
- : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
+ : "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2)
: : "cc", "memory");
return ret;
}
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(strcmp);
* @s: The string to be searched
* @c: The character to search for
*/
-char * strrchr(const char * s, int c)
+char *strrchr(const char *s, int c)
{
size_t len = __strend(s) - s;
@@ -261,7 +261,7 @@ static inline int clcle(const char *s1, unsigned long l1,
* @s1: The string to be searched
* @s2: The string to search for
*/
-char * strstr(const char * s1,const char * s2)
+char *strstr(const char *s1, const char *s2)
{
int l1, l2;
@@ -307,15 +307,15 @@ EXPORT_SYMBOL(memchr);
/**
* memcmp - Compare two areas of memory
- * @cs: One area of memory
- * @ct: Another area of memory
+ * @s1: One area of memory
+ * @s2: Another area of memory
* @count: The size of the area.
*/
-int memcmp(const void *cs, const void *ct, size_t n)
+int memcmp(const void *s1, const void *s2, size_t n)
{
int ret;
- ret = clcle(cs, n, ct, n);
+ ret = clcle(s1, n, s2, n);
if (ret)
ret = ret == 1 ? -1 : 1;
return ret;
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 802903c50de1..c4f8039a35e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -40,10 +40,67 @@ static inline int copy_with_mvcos(void)
}
#endif
+void set_fs(mm_segment_t fs)
+{
+ current->thread.mm_segment = fs;
+ if (fs == USER_DS) {
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
+ } else {
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ if (fs & 1) {
+ if (fs == USER_DS_SACF)
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+ else
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ set_cpu_flag(CIF_ASCE_SECONDARY);
+ }
+}
+EXPORT_SYMBOL(set_fs);
+
+mm_segment_t enable_sacf_uaccess(void)
+{
+ mm_segment_t old_fs;
+ unsigned long asce, cr;
+
+ old_fs = current->thread.mm_segment;
+ if (old_fs & 1)
+ return old_fs;
+ current->thread.mm_segment |= 1;
+ asce = S390_lowcore.kernel_asce;
+ if (likely(old_fs == USER_DS)) {
+ __ctl_store(cr, 1, 1);
+ if (cr != S390_lowcore.kernel_asce) {
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ asce = S390_lowcore.user_asce;
+ }
+ __ctl_store(cr, 7, 7);
+ if (cr != asce) {
+ __ctl_load(asce, 7, 7);
+ set_cpu_flag(CIF_ASCE_SECONDARY);
+ }
+ return old_fs;
+}
+EXPORT_SYMBOL(enable_sacf_uaccess);
+
+void disable_sacf_uaccess(mm_segment_t old_fs)
+{
+ current->thread.mm_segment = old_fs;
+ if (old_fs == USER_DS && test_facility(27)) {
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+}
+EXPORT_SYMBOL(disable_sacf_uaccess);
+
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x81UL;
+ register unsigned long reg0 asm("0") = 0x01UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -74,8 +131,9 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long size)
{
unsigned long tmp1, tmp2;
+ mm_segment_t old_fs;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -102,6 +160,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -116,7 +175,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
+ register unsigned long reg0 asm("0") = 0x010000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -147,8 +206,9 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long size)
{
unsigned long tmp1, tmp2;
+ mm_segment_t old_fs;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -175,6 +235,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -189,7 +250,7 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810081UL;
+ register unsigned long reg0 asm("0") = 0x010001UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -212,9 +273,10 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
unsigned long size)
{
+ mm_segment_t old_fs;
unsigned long tmp1;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -238,6 +300,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -251,7 +314,7 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
+ register unsigned long reg0 asm("0") = 0x010000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -279,9 +342,10 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
{
+ mm_segment_t old_fs;
unsigned long tmp1, tmp2;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -310,6 +374,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -345,10 +410,15 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long __strnlen_user(const char __user *src, unsigned long size)
{
+ mm_segment_t old_fs;
+ unsigned long len;
+
if (unlikely(!size))
return 0;
- load_kernel_asce();
- return strnlen_user_srst(src, size);
+ old_fs = enable_sacf_uaccess();
+ len = strnlen_user_srst(src, size);
+ disable_sacf_uaccess(old_fs);
+ return len;
}
EXPORT_SYMBOL(__strnlen_user);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 829c63dbc81a..6cf024eb2085 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Collaborative memory management interface.
*
@@ -56,10 +57,10 @@ static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
-static void cmm_timer_fn(unsigned long);
+static void cmm_timer_fn(struct timer_list *);
static void cmm_set_timer(void);
+static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
static long cmm_alloc_pages(long nr, long *counter,
struct cmm_page_array **list)
@@ -194,13 +195,11 @@ static void cmm_set_timer(void)
if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
return;
}
- cmm_timer.function = cmm_timer_fn;
- cmm_timer.data = 0;
cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
add_timer(&cmm_timer);
}
-static void cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(struct timer_list *unused)
{
long nr;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 242b78c0a9ec..93faeca52284 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -50,6 +50,13 @@
#define VM_FAULT_SIGNAL 0x080000
#define VM_FAULT_PFAULT 0x100000
+enum fault_type {
+ KERNEL_FAULT,
+ USER_FAULT,
+ VDSO_FAULT,
+ GMAP_FAULT,
+};
+
static unsigned long store_indication __read_mostly;
static int __init fault_init(void)
@@ -99,27 +106,34 @@ void bust_spinlocks(int yes)
}
/*
- * Returns the address space associated with the fault.
- * Returns 0 for kernel space and 1 for user space.
+ * Find out which address space caused the exception.
+ * Access register mode is impossible, ignore space == 3.
*/
-static inline int user_space_fault(struct pt_regs *regs)
+static inline enum fault_type get_fault_type(struct pt_regs *regs)
{
unsigned long trans_exc_code;
- /*
- * The lowest two bits of the translation exception
- * identification indicate which paging table was used.
- */
trans_exc_code = regs->int_parm_long & 3;
- if (trans_exc_code == 3) /* home space -> kernel */
- return 0;
- if (user_mode(regs))
- return 1;
- if (trans_exc_code == 2) /* secondary space -> set_fs */
- return current->thread.mm_segment.ar4;
- if (current->flags & PF_VCPU)
- return 1;
- return 0;
+ if (likely(trans_exc_code == 0)) {
+ /* primary space exception */
+ if (IS_ENABLED(CONFIG_PGSTE) &&
+ test_pt_regs_flag(regs, PIF_GUEST_FAULT))
+ return GMAP_FAULT;
+ if (current->thread.mm_segment == USER_DS)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ if (trans_exc_code == 2) {
+ /* secondary space exception */
+ if (current->thread.mm_segment & 1) {
+ if (current->thread.mm_segment == USER_DS_SACF)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ return VDSO_FAULT;
+ }
+ /* home space exception -> access via kernel ASCE */
+ return KERNEL_FAULT;
}
static int bad_address(void *p)
@@ -204,20 +218,23 @@ static void dump_fault_info(struct pt_regs *regs)
break;
}
pr_cont("mode while using ");
- if (!user_space_fault(regs)) {
- asce = S390_lowcore.kernel_asce;
- pr_cont("kernel ");
- }
-#ifdef CONFIG_PGSTE
- else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
- struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
- asce = gmap->asce;
- pr_cont("gmap ");
- }
-#endif
- else {
+ switch (get_fault_type(regs)) {
+ case USER_FAULT:
asce = S390_lowcore.user_asce;
pr_cont("user ");
+ break;
+ case VDSO_FAULT:
+ asce = S390_lowcore.vdso_asce;
+ pr_cont("vdso ");
+ break;
+ case GMAP_FAULT:
+ asce = ((struct gmap *) S390_lowcore.gmap)->asce;
+ pr_cont("gmap ");
+ break;
+ case KERNEL_FAULT:
+ asce = S390_lowcore.kernel_asce;
+ pr_cont("kernel ");
+ break;
}
pr_cont("ASCE.\n");
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
@@ -273,7 +290,7 @@ static noinline void do_no_context(struct pt_regs *regs)
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (!user_space_fault(regs))
+ if (get_fault_type(regs) == KERNEL_FAULT)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" in virtual kernel address space\n");
else
@@ -395,12 +412,11 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
*/
static inline int do_exception(struct pt_regs *regs, int access)
{
-#ifdef CONFIG_PGSTE
struct gmap *gmap;
-#endif
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ enum fault_type type;
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
@@ -425,8 +441,19 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
+ type = get_fault_type(regs);
+ switch (type) {
+ case KERNEL_FAULT:
+ goto out;
+ case VDSO_FAULT:
+ fault = VM_FAULT_BADMAP;
goto out;
+ case USER_FAULT:
+ case GMAP_FAULT:
+ if (faulthandler_disabled() || !mm)
+ goto out;
+ break;
+ }
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -437,10 +464,9 @@ static inline int do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
-#ifdef CONFIG_PGSTE
- gmap = (current->flags & PF_VCPU) ?
- (struct gmap *) S390_lowcore.gmap : NULL;
- if (gmap) {
+ gmap = NULL;
+ if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
+ gmap = (struct gmap *) S390_lowcore.gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
@@ -452,7 +478,6 @@ static inline int do_exception(struct pt_regs *regs, int access)
if (gmap->pfault_enabled)
flags |= FAULT_FLAG_RETRY_NOWAIT;
}
-#endif
retry:
fault = VM_FAULT_BADMAP;
@@ -507,15 +532,14 @@ retry:
regs, address);
}
if (fault & VM_FAULT_RETRY) {
-#ifdef CONFIG_PGSTE
- if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
+ (flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* FAULT_FLAG_RETRY_NOWAIT has been set,
* mmap_sem has not been released */
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
-#endif
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~(FAULT_FLAG_ALLOW_RETRY |
@@ -525,8 +549,7 @@ retry:
goto retry;
}
}
-#ifdef CONFIG_PGSTE
- if (gmap) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
if (address == -EFAULT) {
@@ -538,7 +561,6 @@ retry:
goto out_up;
}
}
-#endif
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -706,7 +728,7 @@ static void pfault_interrupt(struct ext_code ext_code,
return;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
- pid = param64 & LPP_PFAULT_PID_MASK;
+ pid = param64 & LPP_PID_MASK;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2f66290c9b92..05d459b638f5 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* KVM guest address space mapping code
*
@@ -1187,12 +1188,11 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
unsigned long *sgt)
{
- unsigned long asce, *pgt;
+ unsigned long *pgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
continue;
@@ -1245,12 +1245,11 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
unsigned long *r3t)
{
- unsigned long asce, *sgt;
+ unsigned long *sgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
continue;
@@ -1303,12 +1302,11 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
unsigned long *r2t)
{
- unsigned long asce, *r3t;
+ unsigned long *r3t;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
continue;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 41ba9bd53e48..671535e64aba 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -95,6 +95,7 @@ void __init paging_init(void)
}
init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
@@ -145,8 +146,8 @@ void __init mem_init(void)
void free_initmem(void)
{
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 5bea139517a2..831bdcf407bb 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* Started by Ingo Molnar <mingo@elte.hu>
*/
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index cc2faffa7d6e..cb364153c43c 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -71,10 +71,8 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- if (current->active_mm == mm) {
- clear_user_asce();
+ if (current->active_mm == mm)
set_user_asce(mm);
- }
__tlb_flush_local();
}
@@ -85,8 +83,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
- if (end >= TASK_SIZE_MAX)
- return -ENOMEM;
rc = 0;
notify = 0;
while (mm->context.asce_limit < end) {
@@ -159,13 +155,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
struct page *page_table_alloc_pgste(struct mm_struct *mm)
{
struct page *page;
- unsigned long *table;
+ u64 *table;
page = alloc_page(GFP_KERNEL);
if (page) {
- table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ table = (u64 *)page_to_phys(page);
+ memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
return page;
}
@@ -222,12 +218,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
atomic_set(&page->_mapcount, 3);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else {
/* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE);
+ memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.lock);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae677f814bc0..4f2b65d01a70 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index f2ada0bc08e6..3316d463fc29 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -60,7 +60,7 @@ pte_t __ref *vmem_pte_alloc(void)
pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID, size);
+ memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
@@ -403,17 +403,17 @@ void __init vmem_map_init(void)
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
- __set_memory((unsigned long) _stext,
- (_etext - _stext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long) _etext,
- (_eshared - _etext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
SET_MEMORY_RO);
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
- (_eshared - _stext) >> 10);
+ (unsigned long)(__end_rodata - _stext) >> 10);
}
/*
diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile
index 90568c33ddb0..e0d5f245e42b 100644
--- a/arch/s390/net/Makefile
+++ b/arch/s390/net/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Arch-specific network modules
#
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index 7fa55ccffe48..5e1e5133132d 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -53,10 +53,13 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
*
* We get 160 bytes stack space from calling function, but only use
* 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
+ *
+ * The stack size used by the BPF program ("BPF stack" above) is passed
+ * via "aux->stack_depth".
*/
-#define STK_SPACE (MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
+#define STK_SPACE_ADD (8 + 8 + 4 + 4 + 160)
#define STK_160_UNUSED (160 - 12 * 8)
-#define STK_OFF (STK_SPACE - STK_160_UNUSED)
+#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index b15cd2f0320f..9557d8b516df 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -55,8 +55,7 @@ struct bpf_jit {
#define SEEN_LITERAL 8 /* code uses literals */
#define SEEN_FUNC 16 /* calls C functions */
#define SEEN_TAIL_CALL 32 /* code uses tail calls */
-#define SEEN_SKB_CHANGE 64 /* code changes skb data */
-#define SEEN_REG_AX 128 /* code uses constant blinding */
+#define SEEN_REG_AX 64 /* code uses constant blinding */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
/*
@@ -320,12 +319,12 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
/*
* Restore registers from "rs" (register start) to "re" (register end) on stack
*/
-static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
+static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
{
u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (jit->seen & SEEN_STACK)
- off += STK_OFF;
+ off += STK_OFF + stack_depth;
if (rs == re)
/* lg %rs,off(%r15) */
@@ -369,7 +368,7 @@ static int get_end(struct bpf_jit *jit, int start)
* Save and restore clobbered registers (6-15) on stack.
* We save/restore registers in chunks with gap >= 2 registers.
*/
-static void save_restore_regs(struct bpf_jit *jit, int op)
+static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
{
int re = 6, rs;
@@ -382,7 +381,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
if (op == REGS_SAVE)
save_regs(jit, rs, re);
else
- restore_regs(jit, rs, re);
+ restore_regs(jit, rs, re, stack_depth);
re++;
} while (re <= 15);
}
@@ -414,7 +413,7 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
* Save registers and create stack frame if necessary.
* See stack frame layout desription in "bpf_jit.h"!
*/
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
{
if (jit->seen & SEEN_TAIL_CALL) {
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -427,7 +426,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
/* Tail calls have to skip above initialization */
jit->tail_call_start = jit->prg;
/* Save registers */
- save_restore_regs(jit, REGS_SAVE);
+ save_restore_regs(jit, REGS_SAVE, stack_depth);
/* Setup literal pool */
if (jit->seen & SEEN_LITERAL) {
/* basr %r13,0 */
@@ -442,24 +441,24 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
- EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
+ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
if (jit->seen & SEEN_FUNC)
/* stg %w1,152(%r15) (backchain) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
}
- if (jit->seen & SEEN_SKB)
+ if (jit->seen & SEEN_SKB) {
emit_load_skb_data_hlen(jit);
- if (jit->seen & SEEN_SKB_CHANGE)
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
STK_OFF_SKBP);
+ }
}
/*
* Function epilogue
*/
-static void bpf_jit_epilogue(struct bpf_jit *jit)
+static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
{
/* Return 0 */
if (jit->seen & SEEN_RET0) {
@@ -471,7 +470,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
- save_restore_regs(jit, REGS_RESTORE);
+ save_restore_regs(jit, REGS_RESTORE, stack_depth);
/* br %r14 */
_EMIT2(0x07fe);
}
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
EMIT2(0x0d00, REG_14, REG_W1);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
- if (bpf_helper_changes_pkt_data((void *)func)) {
- jit->seen |= SEEN_SKB_CHANGE;
+ if ((jit->seen & SEEN_SKB) &&
+ bpf_helper_changes_pkt_data((void *)func)) {
/* lg %b1,ST_OFF_SKBP(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
REG_15, STK_OFF_SKBP);
@@ -1019,7 +1018,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
*/
if (jit->seen & SEEN_STACK)
- off = STK_OFF_TCCNT + STK_OFF;
+ off = STK_OFF_TCCNT + STK_OFF + fp->aux->stack_depth;
else
off = STK_OFF_TCCNT;
/* lhi %w0,1 */
@@ -1047,7 +1046,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/*
* Restore registers before calling function
*/
- save_restore_regs(jit, REGS_RESTORE);
+ save_restore_regs(jit, REGS_RESTORE, fp->aux->stack_depth);
/*
* goto *(prog->bpf_func + tail_call_start);
@@ -1273,7 +1272,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
jit->lit = jit->lit_start;
jit->prg = 0;
- bpf_jit_prologue(jit);
+ bpf_jit_prologue(jit, fp->aux->stack_depth);
for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
@@ -1281,7 +1280,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
/* Next instruction address */
jit->addrs[i + insn_count] = jit->prg;
}
- bpf_jit_epilogue(jit);
+ bpf_jit_epilogue(jit, fp->aux->stack_depth);
jit->lit_start = jit->prg;
jit->size = jit->lit;
diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile
index f94ecaffa71b..66c2dff74895 100644
--- a/arch/s390/numa/Makefile
+++ b/arch/s390/numa/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += numa.o
obj-y += toptree.o
obj-$(CONFIG_NUMA_EMU) += mode_emu.o
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 805d8b29193a..22d0871291ee 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the s390 PCI subsystem.
#
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index a25d95a6612d..4902fed221c0 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
@@ -368,7 +369,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
+ break;
si = 0;
continue;
}
@@ -956,7 +958,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
- if (!test_facility(69) || !test_facility(71) || !test_facility(72))
+ if (!test_facility(69) || !test_facility(71))
return 0;
rc = zpci_debug_init();
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index c2f786f0ea06..b482e95b6249 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012,2015
*
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 0d300ee00f4e..2d15d84c20ed 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
@@ -180,6 +181,9 @@ out_unlock:
static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
size_t size, int flags)
{
+ unsigned long irqflags;
+ int ret;
+
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
@@ -195,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
return 0;
}
- return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
- PAGE_ALIGN(size));
+ ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+ PAGE_ALIGN(size));
+ if (ret == -ENOMEM && !s390_iommu_strict) {
+ /* enable the hypervisor to free some resources */
+ if (zpci_refresh_global(zdev))
+ goto out;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
+ ret = 0;
+ }
+out:
+ return ret;
}
static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index ea34086c8674..f069929e8211 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* s390 specific pci instructions
*
@@ -7,6 +8,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
#include <asm/processor.h>
@@ -87,15 +89,21 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
if (cc)
zpci_err_insn(cc, status, addr, range);
+ if (cc == 1 && (status == 4 || status == 16))
+ return -ENOMEM;
+
return (cc) ? -EIO : 0;
}
/* Set Interruption Controls */
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{
+ if (!test_facility(72))
+ return -EIO;
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+ return 0;
}
/* PCI Load */
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index d54c149fbb6b..2e70e25de07a 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -4,11 +4,21 @@
#
hostprogs-y += gen_facilities
+hostprogs-y += gen_opcode_table
+
HOSTCFLAGS_gen_facilities.o += -Wall $(LINUXINCLUDE)
+HOSTCFLAGS_gen_opcode_table.o += -Wall $(LINUXINCLUDE)
define filechk_facilities.h
$(obj)/gen_facilities
endef
+define filechk_dis.h
+ ( $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt )
+endef
+
include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
+
+include/generated/dis.h: $(obj)/gen_opcode_table FORCE
+ $(call filechk,dis.h)
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
new file mode 100644
index 000000000000..357d42681cef
--- /dev/null
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generate opcode table initializers for the in-kernel disassembler.
+ *
+ * Copyright IBM Corp. 2017
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdio.h>
+
+#define STRING_SIZE_MAX 20
+
+struct insn_type {
+ unsigned char byte;
+ unsigned char mask;
+ char **format;
+};
+
+struct insn {
+ struct insn_type *type;
+ char opcode[STRING_SIZE_MAX];
+ char name[STRING_SIZE_MAX];
+ char upper[STRING_SIZE_MAX];
+ char format[STRING_SIZE_MAX];
+ unsigned int name_len;
+};
+
+struct insn_group {
+ struct insn_type *type;
+ int offset;
+ int count;
+ char opcode[2];
+};
+
+struct insn_format {
+ char *format;
+ int type;
+};
+
+struct gen_opcode {
+ struct insn *insn;
+ int nr;
+ struct insn_group *group;
+ int nr_groups;
+};
+
+/*
+ * Table of instruction format types. Each opcode is defined with at
+ * least one byte (two nibbles), three nibbles, or two bytes (four
+ * nibbles).
+ * The byte member of each instruction format type entry defines
+ * within which byte of an instruction the third (and fourth) nibble
+ * of an opcode can be found. The mask member is the and-mask that
+ * needs to be applied on this byte in order to get the third (and
+ * fourth) nibble of the opcode.
+ * The format array defines all instruction formats (as defined in the
+ * Principles of Operation) which have the same position of the opcode
+ * nibbles.
+ * A special case are instruction formats with 1-byte opcodes. In this
+ * case the byte member always is zero, so that the mask is applied on
+ * the (only) byte that contains the opcode.
+ */
+static struct insn_type insn_type_table[] = {
+ {
+ .byte = 0,
+ .mask = 0xff,
+ .format = (char *[]) {
+ "MII",
+ "RR",
+ "RS",
+ "RSI",
+ "RX",
+ "SI",
+ "SMI",
+ "SS",
+ NULL,
+ },
+ },
+ {
+ .byte = 1,
+ .mask = 0x0f,
+ .format = (char *[]) {
+ "RI",
+ "RIL",
+ "SSF",
+ NULL,
+ },
+ },
+ {
+ .byte = 1,
+ .mask = 0xff,
+ .format = (char *[]) {
+ "E",
+ "IE",
+ "RRE",
+ "RRF",
+ "RRR",
+ "S",
+ "SIL",
+ "SSE",
+ NULL,
+ },
+ },
+ {
+ .byte = 5,
+ .mask = 0xff,
+ .format = (char *[]) {
+ "RIE",
+ "RIS",
+ "RRS",
+ "RSE",
+ "RSL",
+ "RSY",
+ "RXE",
+ "RXF",
+ "RXY",
+ "SIY",
+ "VRI",
+ "VRR",
+ "VRS",
+ "VRV",
+ "VRX",
+ "VSI",
+ NULL,
+ },
+ },
+};
+
+static struct insn_type *insn_format_to_type(char *format)
+{
+ char tmp[STRING_SIZE_MAX];
+ char *base_format, **ptr;
+ int i;
+
+ strcpy(tmp, format);
+ base_format = tmp;
+ base_format = strsep(&base_format, "_");
+ for (i = 0; i < sizeof(insn_type_table) / sizeof(insn_type_table[0]); i++) {
+ ptr = insn_type_table[i].format;
+ while (*ptr) {
+ if (!strcmp(base_format, *ptr))
+ return &insn_type_table[i];
+ ptr++;
+ }
+ }
+ exit(EXIT_FAILURE);
+}
+
+static void read_instructions(struct gen_opcode *desc)
+{
+ struct insn insn;
+ int rc, i;
+
+ while (1) {
+ rc = scanf("%s %s %s", insn.opcode, insn.name, insn.format);
+ if (rc == EOF)
+ break;
+ if (rc != 3)
+ exit(EXIT_FAILURE);
+ insn.type = insn_format_to_type(insn.format);
+ insn.name_len = strlen(insn.name);
+ for (i = 0; i <= insn.name_len; i++)
+ insn.upper[i] = toupper((unsigned char)insn.name[i]);
+ desc->nr++;
+ desc->insn = realloc(desc->insn, desc->nr * sizeof(*desc->insn));
+ if (!desc->insn)
+ exit(EXIT_FAILURE);
+ desc->insn[desc->nr - 1] = insn;
+ }
+}
+
+static int cmpformat(const void *a, const void *b)
+{
+ return strcmp(((struct insn *)a)->format, ((struct insn *)b)->format);
+}
+
+static void print_formats(struct gen_opcode *desc)
+{
+ char *format;
+ int i, count;
+
+ qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpformat);
+ format = "";
+ count = 0;
+ printf("enum {\n");
+ for (i = 0; i < desc->nr; i++) {
+ if (!strcmp(format, desc->insn[i].format))
+ continue;
+ count++;
+ format = desc->insn[i].format;
+ printf("\tINSTR_%s,\n", format);
+ }
+ printf("}; /* %d */\n\n", count);
+}
+
+static int cmp_long_insn(const void *a, const void *b)
+{
+ return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name);
+}
+
+static void print_long_insn(struct gen_opcode *desc)
+{
+ struct insn *insn;
+ int i, count;
+
+ qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmp_long_insn);
+ count = 0;
+ printf("enum {\n");
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->name_len < 6)
+ continue;
+ printf("\tLONG_INSN_%s,\n", insn->upper);
+ count++;
+ }
+ printf("}; /* %d */\n\n", count);
+
+ printf("#define LONG_INSN_INITIALIZER { \\\n");
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->name_len < 6)
+ continue;
+ printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name);
+ }
+ printf("}\n\n");
+}
+
+static void print_opcode(struct insn *insn, int nr)
+{
+ char *opcode;
+
+ opcode = insn->opcode;
+ if (insn->type->byte != 0)
+ opcode += 2;
+ printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format);
+ if (insn->name_len < 6)
+ printf(".name = \"%s\" ", insn->name);
+ else
+ printf(".offset = LONG_INSN_%s ", insn->upper);
+ printf("}, \\\n");
+}
+
+static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
+{
+ struct insn_group *group;
+
+ group = desc->group ? &desc->group[desc->nr_groups - 1] : NULL;
+ if (group && (!strncmp(group->opcode, insn->opcode, 2) || group->type->byte == 0)) {
+ group->count++;
+ return;
+ }
+ desc->nr_groups++;
+ desc->group = realloc(desc->group, desc->nr_groups * sizeof(*desc->group));
+ if (!desc->group)
+ exit(EXIT_FAILURE);
+ group = &desc->group[desc->nr_groups - 1];
+ strncpy(group->opcode, insn->opcode, 2);
+ group->type = insn->type;
+ group->offset = offset;
+ group->count = 1;
+}
+
+static int cmpopcode(const void *a, const void *b)
+{
+ return strcmp(((struct insn *)a)->opcode, ((struct insn *)b)->opcode);
+}
+
+static void print_opcode_table(struct gen_opcode *desc)
+{
+ char opcode[2] = "";
+ struct insn *insn;
+ int i, offset;
+
+ qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpopcode);
+ printf("#define OPCODE_TABLE_INITIALIZER { \\\n");
+ offset = 0;
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->type->byte == 0)
+ continue;
+ add_to_group(desc, insn, offset);
+ if (strncmp(opcode, insn->opcode, 2)) {
+ strncpy(opcode, insn->opcode, 2);
+ printf("\t/* %.2s */ \\\n", opcode);
+ }
+ print_opcode(insn, offset);
+ offset++;
+ }
+ printf("\t/* 1-byte opcode instructions */ \\\n");
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->type->byte != 0)
+ continue;
+ add_to_group(desc, insn, offset);
+ print_opcode(insn, offset);
+ offset++;
+ }
+ printf("}\n\n");
+}
+
+static void print_opcode_table_offsets(struct gen_opcode *desc)
+{
+ struct insn_group *group;
+ int i;
+
+ printf("#define OPCODE_OFFSET_INITIALIZER { \\\n");
+ for (i = 0; i < desc->nr_groups; i++) {
+ group = &desc->group[i];
+ printf("\t{ .opcode = 0x%.2s, .mask = 0x%02x, .byte = %d, .offset = %d, .count = %d }, \\\n",
+ group->opcode, group->type->mask, group->type->byte, group->offset, group->count);
+ }
+ printf("}\n\n");
+}
+
+int main(int argc, char **argv)
+{
+ struct gen_opcode _desc = { 0 };
+ struct gen_opcode *desc = &_desc;
+
+ read_instructions(desc);
+ printf("#ifndef __S390_GENERATED_DIS_H__\n");
+ printf("#define __S390_GENERATED_DIS_H__\n");
+ printf("/*\n");
+ printf(" * DO NOT MODIFY.\n");
+ printf(" *\n");
+ printf(" * This file was generated by %s\n", __FILE__);
+ printf(" */\n\n");
+ print_formats(desc);
+ print_long_insn(desc);
+ print_opcode_table(desc);
+ print_opcode_table_offsets(desc);
+ printf("#endif\n");
+ exit(EXIT_SUCCESS);
+}
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
new file mode 100644
index 000000000000..1cbed82cd17b
--- /dev/null
+++ b/arch/s390/tools/opcodes.txt
@@ -0,0 +1,1183 @@
+0101 pr E
+0102 upt E
+0104 ptff E
+0107 sckpf E
+010a pfpo E
+010b tam E
+010c sam24 E
+010d sam31 E
+010e sam64 E
+01ff trap2 E
+04 spm RR_R0
+05 balr RR_RR
+06 bctr RR_RR
+07 bcr RR_UR
+0a svc RR_U0
+0b bsm RR_RR
+0c bassm RR_RR
+0d basr RR_RR
+0e mvcl RR_RR
+0f clcl RR_RR
+10 lpr RR_RR
+11 lnr RR_RR
+12 ltr RR_RR
+13 lcr RR_RR
+14 nr RR_RR
+15 clr RR_RR
+16 or RR_RR
+17 xr RR_RR
+18 lr RR_RR
+19 cr RR_RR
+1a ar RR_RR
+1b sr RR_RR
+1c mr RR_RR
+1d dr RR_RR
+1e alr RR_RR
+1f slr RR_RR
+20 lpdr RR_FF
+21 lndr RR_FF
+22 ltdr RR_FF
+23 lcdr RR_FF
+24 hdr RR_FF
+25 ldxr RR_FF
+26 mxr RR_FF
+27 mxdr RR_FF
+28 ldr RR_FF
+29 cdr RR_FF
+2a adr RR_FF
+2b sdr RR_FF
+2c mdr RR_FF
+2d ddr RR_FF
+2e awr RR_FF
+2f swr RR_FF
+30 lper RR_FF
+31 lner RR_FF
+32 lter RR_FF
+33 lcer RR_FF
+34 her RR_FF
+35 ledr RR_FF
+36 axr RR_FF
+37 sxr RR_FF
+38 ler RR_FF
+39 cer RR_FF
+3a aer RR_FF
+3b ser RR_FF
+3c mder RR_FF
+3d der RR_FF
+3e aur RR_FF
+3f sur RR_FF
+40 sth RX_RRRD
+41 la RX_RRRD
+42 stc RX_RRRD
+43 ic RX_RRRD
+44 ex RX_RRRD
+45 bal RX_RRRD
+46 bct RX_RRRD
+47 bc RX_URRD
+48 lh RX_RRRD
+49 ch RX_RRRD
+4a ah RX_RRRD
+4b sh RX_RRRD
+4c mh RX_RRRD
+4d bas RX_RRRD
+4e cvd RX_RRRD
+4f cvb RX_RRRD
+50 st RX_RRRD
+51 lae RX_RRRD
+54 n RX_RRRD
+55 cl RX_RRRD
+56 o RX_RRRD
+57 x RX_RRRD
+58 l RX_RRRD
+59 c RX_RRRD
+5a a RX_RRRD
+5b s RX_RRRD
+5c m RX_RRRD
+5d d RX_RRRD
+5e al RX_RRRD
+5f sl RX_RRRD
+60 std RX_FRRD
+67 mxd RX_FRRD
+68 ld RX_FRRD
+69 cd RX_FRRD
+6a ad RX_FRRD
+6b sd RX_FRRD
+6c md RX_FRRD
+6d dd RX_FRRD
+6e aw RX_FRRD
+6f sw RX_FRRD
+70 ste RX_FRRD
+71 ms RX_RRRD
+78 le RX_FRRD
+79 ce RX_FRRD
+7a ae RX_FRRD
+7b se RX_FRRD
+7c mde RX_FRRD
+7d de RX_FRRD
+7e au RX_FRRD
+7f su RX_FRRD
+80 ssm SI_RD
+82 lpsw SI_RD
+83 diag RS_RRRD
+84 brxh RSI_RRP
+85 brxle RSI_RRP
+86 bxh RS_RRRD
+87 bxle RS_RRRD
+88 srl RS_R0RD
+89 sll RS_R0RD
+8a sra RS_R0RD
+8b sla RS_R0RD
+8c srdl RS_R0RD
+8d sldl RS_R0RD
+8e srda RS_R0RD
+8f slda RS_R0RD
+90 stm RS_RRRD
+91 tm SI_URD
+92 mvi SI_URD
+93 ts SI_RD
+94 ni SI_URD
+95 cli SI_URD
+96 oi SI_URD
+97 xi SI_URD
+98 lm RS_RRRD
+99 trace RS_RRRD
+9a lam RS_AARD
+9b stam RS_AARD
+a50 iihh RI_RU
+a51 iihl RI_RU
+a52 iilh RI_RU
+a53 iill RI_RU
+a54 nihh RI_RU
+a55 nihl RI_RU
+a56 nilh RI_RU
+a57 nill RI_RU
+a58 oihh RI_RU
+a59 oihl RI_RU
+a5a oilh RI_RU
+a5b oill RI_RU
+a5c llihh RI_RU
+a5d llihl RI_RU
+a5e llilh RI_RU
+a5f llill RI_RU
+a70 tmlh RI_RU
+a71 tmll RI_RU
+a72 tmhh RI_RU
+a73 tmhl RI_RU
+a74 brc RI_UP
+a75 bras RI_RP
+a76 brct RI_RP
+a77 brctg RI_RP
+a78 lhi RI_RI
+a79 lghi RI_RI
+a7a ahi RI_RI
+a7b aghi RI_RI
+a7c mhi RI_RI
+a7d mghi RI_RI
+a7e chi RI_RI
+a7f cghi RI_RI
+a8 mvcle RS_RRRD
+a9 clcle RS_RRRD
+aa0 rinext RI_RI
+aa1 rion RI_RI
+aa2 tric RI_RI
+aa3 rioff RI_RI
+aa4 riemit RI_RI
+ac stnsm SI_URD
+ad stosm SI_URD
+ae sigp RS_RRRD
+af mc SI_URD
+b1 lra RX_RRRD
+b202 stidp S_RD
+b204 sck S_RD
+b205 stck S_RD
+b206 sckc S_RD
+b207 stckc S_RD
+b208 spt S_RD
+b209 stpt S_RD
+b20a spka S_RD
+b20b ipk S_00
+b20d ptlb S_00
+b210 spx S_RD
+b211 stpx S_RD
+b212 stap S_RD
+b214 sie S_RD
+b218 pc S_RD
+b219 sac S_RD
+b21a cfc S_RD
+b220 servc RRE_RR
+b221 ipte RRF_RURR
+b222 ipm RRE_R0
+b223 ivsk RRE_RR
+b224 iac RRE_R0
+b225 ssar RRE_R0
+b226 epar RRE_R0
+b227 esar RRE_R0
+b228 pt RRE_RR
+b229 iske RRE_RR
+b22a rrbe RRE_RR
+b22b sske RRF_U0RR
+b22c tb RRE_RR
+b22d dxr RRE_FF
+b22e pgin RRE_RR
+b22f pgout RRE_RR
+b230 csch S_00
+b231 hsch S_00
+b232 msch S_RD
+b233 ssch S_RD
+b234 stsch S_RD
+b235 tsch S_RD
+b236 tpi S_RD
+b237 sal S_00
+b238 rsch S_00
+b239 stcrw S_RD
+b23a stcps S_RD
+b23b rchp S_00
+b23c schm S_00
+b240 bakr RRE_RR
+b241 cksm RRE_RR
+b244 sqdr RRE_FF
+b245 sqer RRE_FF
+b246 stura RRE_RR
+b247 msta RRE_R0
+b248 palb RRE_00
+b249 ereg RRE_RR
+b24a esta RRE_RR
+b24b lura RRE_RR
+b24c tar RRE_AR
+b24d cpya RRE_AA
+b24e sar RRE_AR
+b24f ear RRE_RA
+b250 csp RRE_RR
+b252 msr RRE_RR
+b254 mvpg RRE_RR
+b255 mvst RRE_RR
+b256 sthyi RRE_RR
+b257 cuse RRE_RR
+b258 bsg RRE_RR
+b25a bsa RRE_RR
+b25d clst RRE_RR
+b25e srst RRE_RR
+b263 cmpsc RRE_RR
+b274 siga S_RD
+b276 xsch S_00
+b277 rp S_RD
+b278 stcke S_RD
+b279 sacf S_RD
+b27c stckf S_RD
+b27d stsi S_RD
+b280 lpp S_RD
+b284 lcctl S_RD
+b285 lpctl S_RD
+b286 qsi S_RD
+b287 lsctl S_RD
+b28e qctri S_RD
+b299 srnm S_RD
+b29c stfpc S_RD
+b29d lfpc S_RD
+b2a5 tre RRE_RR
+b2a6 cu21 RRF_U0RR
+b2a7 cu12 RRF_U0RR
+b2b0 stfle S_RD
+b2b1 stfl S_RD
+b2b2 lpswe S_RD
+b2b8 srnmb S_RD
+b2b9 srnmt S_RD
+b2bd lfas S_RD
+b2e0 scctr RRE_RR
+b2e1 spctr RRE_RR
+b2e4 ecctr RRE_RR
+b2e5 epctr RRE_RR
+b2e8 ppa RRF_U0RR
+b2ec etnd RRE_R0
+b2ed ecpga RRE_RR
+b2f8 tend S_00
+b2fa niai IE_UU
+b2fc tabort S_RD
+b2ff trap4 S_RD
+b300 lpebr RRE_FF
+b301 lnebr RRE_FF
+b302 ltebr RRE_FF
+b303 lcebr RRE_FF
+b304 ldebr RRE_FF
+b305 lxdbr RRE_FF
+b306 lxebr RRE_FF
+b307 mxdbr RRE_FF
+b308 kebr RRE_FF
+b309 cebr RRE_FF
+b30a aebr RRE_FF
+b30b sebr RRE_FF
+b30c mdebr RRE_FF
+b30d debr RRE_FF
+b30e maebr RRF_F0FF
+b30f msebr RRF_F0FF
+b310 lpdbr RRE_FF
+b311 lndbr RRE_FF
+b312 ltdbr RRE_FF
+b313 lcdbr RRE_FF
+b314 sqebr RRE_FF
+b315 sqdbr RRE_FF
+b316 sqxbr RRE_FF
+b317 meebr RRE_FF
+b318 kdbr RRE_FF
+b319 cdbr RRE_FF
+b31a adbr RRE_FF
+b31b sdbr RRE_FF
+b31c mdbr RRE_FF
+b31d ddbr RRE_FF
+b31e madbr RRF_F0FF
+b31f msdbr RRF_F0FF
+b324 lder RRE_FF
+b325 lxdr RRE_FF
+b326 lxer RRE_FF
+b32e maer RRF_F0FF
+b32f mser RRF_F0FF
+b336 sqxr RRE_FF
+b337 meer RRE_FF
+b338 maylr RRF_F0FF
+b339 mylr RRF_F0FF
+b33a mayr RRF_F0FF
+b33b myr RRF_F0FF
+b33c mayhr RRF_F0FF
+b33d myhr RRF_F0FF
+b33e madr RRF_F0FF
+b33f msdr RRF_F0FF
+b340 lpxbr RRE_FF
+b341 lnxbr RRE_FF
+b342 ltxbr RRE_FF
+b343 lcxbr RRE_FF
+b344 ledbra RRF_UUFF
+b345 ldxbra RRF_UUFF
+b346 lexbra RRF_UUFF
+b347 fixbra RRF_UUFF
+b348 kxbr RRE_FF
+b349 cxbr RRE_FF
+b34a axbr RRE_FF
+b34b sxbr RRE_FF
+b34c mxbr RRE_FF
+b34d dxbr RRE_FF
+b350 tbedr RRF_U0FF
+b351 tbdr RRF_U0FF
+b353 diebr RRF_FUFF
+b357 fiebra RRF_UUFF
+b358 thder RRE_FF
+b359 thdr RRE_FF
+b35b didbr RRF_FUFF
+b35f fidbra RRF_UUFF
+b360 lpxr RRE_FF
+b361 lnxr RRE_FF
+b362 ltxr RRE_FF
+b363 lcxr RRE_FF
+b365 lxr RRE_FF
+b366 lexr RRE_FF
+b367 fixr RRE_FF
+b369 cxr RRE_FF
+b370 lpdfr RRE_FF
+b371 lndfr RRE_FF
+b372 cpsdr RRF_F0FF2
+b373 lcdfr RRE_FF
+b374 lzer RRE_F0
+b375 lzdr RRE_F0
+b376 lzxr RRE_F0
+b377 fier RRE_FF
+b37f fidr RRE_FF
+b384 sfpc RRE_RR
+b385 sfasr RRE_R0
+b38c efpc RRE_RR
+b390 celfbr RRF_UUFR
+b391 cdlfbr RRF_UUFR
+b392 cxlfbr RRF_UUFR
+b394 cefbra RRF_UUFR
+b395 cdfbra RRF_UUFR
+b396 cxfbra RRF_UUFR
+b398 cfebra RRF_UURF
+b399 cfdbra RRF_UURF
+b39a cfxbra RRF_UURF
+b39c clfebr RRF_UURF
+b39d clfdbr RRF_UURF
+b39e clfxbr RRF_UURF
+b3a0 celgbr RRF_UUFR
+b3a1 cdlgbr RRF_UUFR
+b3a2 cxlgbr RRF_UUFR
+b3a4 cegbra RRF_UUFR
+b3a5 cdgbra RRF_UUFR
+b3a6 cxgbra RRF_UUFR
+b3a8 cgebra RRF_UURF
+b3a9 cgdbra RRF_UURF
+b3aa cgxbra RRF_UURF
+b3ac clgebr RRF_UURF
+b3ad clgdbr RRF_UURF
+b3ae clgxbr RRF_UURF
+b3b4 cefr RRE_FR
+b3b5 cdfr RRE_FR
+b3b6 cxfr RRE_FR
+b3b8 cfer RRF_U0RF
+b3b9 cfdr RRF_U0RF
+b3ba cfxr RRF_U0RF
+b3c1 ldgr RRE_FR
+b3c4 cegr RRE_FR
+b3c5 cdgr RRE_FR
+b3c6 cxgr RRE_FR
+b3c8 cger RRF_U0RF
+b3c9 cgdr RRF_U0RF
+b3ca cgxr RRF_U0RF
+b3cd lgdr RRE_RF
+b3d0 mdtra RRF_FUFF2
+b3d1 ddtra RRF_FUFF2
+b3d2 adtra RRF_FUFF2
+b3d3 sdtra RRF_FUFF2
+b3d4 ldetr RRF_0UFF
+b3d5 ledtr RRF_UUFF
+b3d6 ltdtr RRE_FF
+b3d7 fidtr RRF_UUFF
+b3d8 mxtra RRF_FUFF2
+b3d9 dxtra RRF_FUFF2
+b3da axtra RRF_FUFF2
+b3db sxtra RRF_FUFF2
+b3dc lxdtr RRF_0UFF
+b3dd ldxtr RRF_UUFF
+b3de ltxtr RRE_FF
+b3df fixtr RRF_UUFF
+b3e0 kdtr RRE_FF
+b3e1 cgdtra RRF_UURF
+b3e2 cudtr RRE_RF
+b3e3 csdtr RRF_0URF
+b3e4 cdtr RRE_FF
+b3e5 eedtr RRE_RF
+b3e7 esdtr RRE_RF
+b3e8 kxtr RRE_FF
+b3e9 cgxtra RRF_UURF
+b3ea cuxtr RRE_RF
+b3eb csxtr RRF_0URF
+b3ec cxtr RRE_FF
+b3ed eextr RRE_RF
+b3ef esxtr RRE_RF
+b3f1 cdgtra RRF_UUFR
+b3f2 cdutr RRE_FR
+b3f3 cdstr RRE_FR
+b3f4 cedtr RRE_FF
+b3f5 qadtr RRF_FUFF
+b3f6 iedtr RRF_F0FR
+b3f7 rrdtr RRF_FFRU
+b3f9 cxgtra RRF_UUFR
+b3fa cxutr RRE_FR
+b3fb cxstr RRE_FR
+b3fc cextr RRE_FF
+b3fd qaxtr RRF_FUFF
+b3fe iextr RRF_F0FR
+b3ff rrxtr RRF_FFRU
+b6 stctl RS_CCRD
+b7 lctl RS_CCRD
+b900 lpgr RRE_RR
+b901 lngr RRE_RR
+b902 ltgr RRE_RR
+b903 lcgr RRE_RR
+b904 lgr RRE_RR
+b905 lurag RRE_RR
+b906 lgbr RRE_RR
+b907 lghr RRE_RR
+b908 agr RRE_RR
+b909 sgr RRE_RR
+b90a algr RRE_RR
+b90b slgr RRE_RR
+b90c msgr RRE_RR
+b90d dsgr RRE_RR
+b90e eregg RRE_RR
+b90f lrvgr RRE_RR
+b910 lpgfr RRE_RR
+b911 lngfr RRE_RR
+b912 ltgfr RRE_RR
+b913 lcgfr RRE_RR
+b914 lgfr RRE_RR
+b916 llgfr RRE_RR
+b917 llgtr RRE_RR
+b918 agfr RRE_RR
+b919 sgfr RRE_RR
+b91a algfr RRE_RR
+b91b slgfr RRE_RR
+b91c msgfr RRE_RR
+b91d dsgfr RRE_RR
+b91e kmac RRE_RR
+b91f lrvr RRE_RR
+b920 cgr RRE_RR
+b921 clgr RRE_RR
+b925 sturg RRE_RR
+b926 lbr RRE_RR
+b927 lhr RRE_RR
+b928 pckmo RRE_00
+b929 kma RRF_R0RR
+b92a kmf RRE_RR
+b92b kmo RRE_RR
+b92c pcc RRE_00
+b92d kmctr RRF_R0RR
+b92e km RRE_RR
+b92f kmc RRE_RR
+b930 cgfr RRE_RR
+b931 clgfr RRE_RR
+b93c ppno RRE_RR
+b93e kimd RRE_RR
+b93f klmd RRE_RR
+b941 cfdtr RRF_UURF
+b942 clgdtr RRF_UURF
+b943 clfdtr RRF_UURF
+b946 bctgr RRE_RR
+b949 cfxtr RRF_UURF
+b94a clgxtr RRF_UURF
+b94b clfxtr RRF_UURF
+b951 cdftr RRF_UUFR
+b952 cdlgtr RRF_UUFR
+b953 cdlftr RRF_UUFR
+b959 cxftr RRF_UUFR
+b95a cxlgtr RRF_UUFR
+b95b cxlftr RRF_UUFR
+b960 cgrt RRF_U0RR
+b961 clgrt RRF_U0RR
+b972 crt RRF_U0RR
+b973 clrt RRF_U0RR
+b980 ngr RRE_RR
+b981 ogr RRE_RR
+b982 xgr RRE_RR
+b983 flogr RRE_RR
+b984 llgcr RRE_RR
+b985 llghr RRE_RR
+b986 mlgr RRE_RR
+b987 dlgr RRE_RR
+b988 alcgr RRE_RR
+b989 slbgr RRE_RR
+b98a cspg RRE_RR
+b98d epsw RRE_RR
+b98e idte RRF_RURR2
+b98f crdte RRF_RURR2
+b990 trtt RRF_U0RR
+b991 trto RRF_U0RR
+b992 trot RRF_U0RR
+b993 troo RRF_U0RR
+b994 llcr RRE_RR
+b995 llhr RRE_RR
+b996 mlr RRE_RR
+b997 dlr RRE_RR
+b998 alcr RRE_RR
+b999 slbr RRE_RR
+b99a epair RRE_R0
+b99b esair RRE_R0
+b99d esea RRE_R0
+b99e pti RRE_RR
+b99f ssair RRE_R0
+b9a1 tpei RRE_RR
+b9a2 ptf RRE_R0
+b9aa lptea RRF_RURR2
+b9ac irbm RRE_RR
+b9ae rrbm RRE_RR
+b9af pfmf RRE_RR
+b9b0 cu14 RRF_U0RR
+b9b1 cu24 RRF_U0RR
+b9b2 cu41 RRE_RR
+b9b3 cu42 RRE_RR
+b9bd trtre RRF_U0RR
+b9be srstu RRE_RR
+b9bf trte RRF_U0RR
+b9c8 ahhhr RRF_R0RR2
+b9c9 shhhr RRF_R0RR2
+b9ca alhhhr RRF_R0RR2
+b9cb slhhhr RRF_R0RR2
+b9cd chhr RRE_RR
+b9cf clhhr RRE_RR
+b9d0 pcistg RRE_RR
+b9d2 pcilg RRE_RR
+b9d3 rpcit RRE_RR
+b9d8 ahhlr RRF_R0RR2
+b9d9 shhlr RRF_R0RR2
+b9da alhhlr RRF_R0RR2
+b9db slhhlr RRF_R0RR2
+b9dd chlr RRE_RR
+b9df clhlr RRE_RR
+b9e0 locfhr RRF_U0RR
+b9e1 popcnt RRE_RR
+b9e2 locgr RRF_U0RR
+b9e4 ngrk RRF_R0RR2
+b9e6 ogrk RRF_R0RR2
+b9e7 xgrk RRF_R0RR2
+b9e8 agrk RRF_R0RR2
+b9e9 sgrk RRF_R0RR2
+b9ea algrk RRF_R0RR2
+b9eb slgrk RRF_R0RR2
+b9ec mgrk RRF_R0RR2
+b9ed msgrkc RRF_R0RR2
+b9f2 locr RRF_U0RR
+b9f4 nrk RRF_R0RR2
+b9f6 ork RRF_R0RR2
+b9f7 xrk RRF_R0RR2
+b9f8 ark RRF_R0RR2
+b9f9 srk RRF_R0RR2
+b9fa alrk RRF_R0RR2
+b9fb slrk RRF_R0RR2
+b9fd msrkc RRF_R0RR2
+ba cs RS_RRRD
+bb cds RS_RRRD
+bd clm RS_RURD
+be stcm RS_RURD
+bf icm RS_RURD
+c00 larl RIL_RP
+c01 lgfi RIL_RI
+c04 brcl RIL_UP
+c05 brasl RIL_RP
+c06 xihf RIL_RU
+c07 xilf RIL_RU
+c08 iihf RIL_RU
+c09 iilf RIL_RU
+c0a nihf RIL_RU
+c0b nilf RIL_RU
+c0c oihf RIL_RU
+c0d oilf RIL_RU
+c0e llihf RIL_RU
+c0f llilf RIL_RU
+c20 msgfi RIL_RI
+c21 msfi RIL_RI
+c24 slgfi RIL_RU
+c25 slfi RIL_RU
+c28 agfi RIL_RI
+c29 afi RIL_RI
+c2a algfi RIL_RU
+c2b alfi RIL_RU
+c2c cgfi RIL_RI
+c2d cfi RIL_RI
+c2e clgfi RIL_RU
+c2f clfi RIL_RU
+c42 llhrl RIL_RP
+c44 lghrl RIL_RP
+c45 lhrl RIL_RP
+c46 llghrl RIL_RP
+c47 sthrl RIL_RP
+c48 lgrl RIL_RP
+c4b stgrl RIL_RP
+c4c lgfrl RIL_RP
+c4d lrl RIL_RP
+c4e llgfrl RIL_RP
+c4f strl RIL_RP
+c5 bprp MII_UPP
+c60 exrl RIL_RP
+c62 pfdrl RIL_UP
+c64 cghrl RIL_RP
+c65 chrl RIL_RP
+c66 clghrl RIL_RP
+c67 clhrl RIL_RP
+c68 cgrl RIL_RP
+c6a clgrl RIL_RP
+c6c cgfrl RIL_RP
+c6d crl RIL_RP
+c6e clgfrl RIL_RP
+c6f clrl RIL_RP
+c7 bpp SMI_U0RDP
+c80 mvcos SSF_RRDRD
+c81 ectg SSF_RRDRD
+c82 csst SSF_RRDRD
+c84 lpd SSF_RRDRD2
+c85 lpdg SSF_RRDRD2
+cc6 brcth RIL_RP
+cc8 aih RIL_RI
+cca alsih RIL_RI
+ccb alsihn RIL_RI
+ccd cih RIL_RI
+ccf clih RIL_RU
+d0 trtr SS_L0RDRD
+d1 mvn SS_L0RDRD
+d2 mvc SS_L0RDRD
+d3 mvz SS_L0RDRD
+d4 nc SS_L0RDRD
+d5 clc SS_L0RDRD
+d6 oc SS_L0RDRD
+d7 xc SS_L0RDRD
+d9 mvck SS_RRRDRD
+da mvcp SS_RRRDRD
+db mvcs SS_RRRDRD
+dc tr SS_L0RDRD
+dd trt SS_L0RDRD
+de ed SS_L0RDRD
+df edmk SS_L0RDRD
+e1 pku SS_L2RDRD
+e2 unpku SS_L0RDRD
+e302 ltg RXY_RRRD
+e303 lrag RXY_RRRD
+e304 lg RXY_RRRD
+e306 cvby RXY_RRRD
+e308 ag RXY_RRRD
+e309 sg RXY_RRRD
+e30a alg RXY_RRRD
+e30b slg RXY_RRRD
+e30c msg RXY_RRRD
+e30d dsg RXY_RRRD
+e30e cvbg RXY_RRRD
+e30f lrvg RXY_RRRD
+e312 lt RXY_RRRD
+e313 lray RXY_RRRD
+e314 lgf RXY_RRRD
+e315 lgh RXY_RRRD
+e316 llgf RXY_RRRD
+e317 llgt RXY_RRRD
+e318 agf RXY_RRRD
+e319 sgf RXY_RRRD
+e31a algf RXY_RRRD
+e31b slgf RXY_RRRD
+e31c msgf RXY_RRRD
+e31d dsgf RXY_RRRD
+e31e lrv RXY_RRRD
+e31f lrvh RXY_RRRD
+e320 cg RXY_RRRD
+e321 clg RXY_RRRD
+e324 stg RXY_RRRD
+e325 ntstg RXY_RRRD
+e326 cvdy RXY_RRRD
+e32a lzrg RXY_RRRD
+e32e cvdg RXY_RRRD
+e32f strvg RXY_RRRD
+e330 cgf RXY_RRRD
+e331 clgf RXY_RRRD
+e332 ltgf RXY_RRRD
+e334 cgh RXY_RRRD
+e336 pfd RXY_URRD
+e338 agh RXY_RRRD
+e339 sgh RXY_RRRD
+e33a llzrgf RXY_RRRD
+e33b lzrf RXY_RRRD
+e33c mgh RXY_RRRD
+e33e strv RXY_RRRD
+e33f strvh RXY_RRRD
+e346 bctg RXY_RRRD
+e347 bic RXY_URRD
+e348 llgfsg RXY_RRRD
+e349 stgsc RXY_RRRD
+e34c lgg RXY_RRRD
+e34d lgsc RXY_RRRD
+e350 sty RXY_RRRD
+e351 msy RXY_RRRD
+e353 msc RXY_RRRD
+e354 ny RXY_RRRD
+e355 cly RXY_RRRD
+e356 oy RXY_RRRD
+e357 xy RXY_RRRD
+e358 ly RXY_RRRD
+e359 cy RXY_RRRD
+e35a ay RXY_RRRD
+e35b sy RXY_RRRD
+e35c mfy RXY_RRRD
+e35e aly RXY_RRRD
+e35f sly RXY_RRRD
+e370 sthy RXY_RRRD
+e371 lay RXY_RRRD
+e372 stcy RXY_RRRD
+e373 icy RXY_RRRD
+e375 laey RXY_RRRD
+e376 lb RXY_RRRD
+e377 lgb RXY_RRRD
+e378 lhy RXY_RRRD
+e379 chy RXY_RRRD
+e37a ahy RXY_RRRD
+e37b shy RXY_RRRD
+e37c mhy RXY_RRRD
+e380 ng RXY_RRRD
+e381 og RXY_RRRD
+e382 xg RXY_RRRD
+e383 msgc RXY_RRRD
+e384 mg RXY_RRRD
+e385 lgat RXY_RRRD
+e386 mlg RXY_RRRD
+e387 dlg RXY_RRRD
+e388 alcg RXY_RRRD
+e389 slbg RXY_RRRD
+e38e stpq RXY_RRRD
+e38f lpq RXY_RRRD
+e390 llgc RXY_RRRD
+e391 llgh RXY_RRRD
+e394 llc RXY_RRRD
+e395 llh RXY_RRRD
+e396 ml RXY_RRRD
+e397 dl RXY_RRRD
+e398 alc RXY_RRRD
+e399 slb RXY_RRRD
+e39c llgtat RXY_RRRD
+e39d llgfat RXY_RRRD
+e39f lat RXY_RRRD
+e3c0 lbh RXY_RRRD
+e3c2 llch RXY_RRRD
+e3c3 stch RXY_RRRD
+e3c4 lhh RXY_RRRD
+e3c6 llhh RXY_RRRD
+e3c7 sthh RXY_RRRD
+e3c8 lfhat RXY_RRRD
+e3ca lfh RXY_RRRD
+e3cb stfh RXY_RRRD
+e3cd chf RXY_RRRD
+e3cf clhf RXY_RRRD
+e3d0 mpcifc RXY_RRRD
+e3d4 stpcifc RXY_RRRD
+e500 lasp SSE_RDRD
+e501 tprot SSE_RDRD
+e502 strag SSE_RDRD
+e50e mvcsk SSE_RDRD
+e50f mvcdk SSE_RDRD
+e544 mvhhi SIL_RDI
+e548 mvghi SIL_RDI
+e54c mvhi SIL_RDI
+e554 chhsi SIL_RDI
+e555 clhhsi SIL_RDU
+e558 cghsi SIL_RDI
+e559 clghsi SIL_RDU
+e55c chsi SIL_RDI
+e55d clfhsi SIL_RDU
+e560 tbegin SIL_RDU
+e561 tbeginc SIL_RDU
+e634 vpkz VSI_URDV
+e635 vlrl VSI_URDV
+e637 vlrlr VRS_RRDV
+e63c vupkz VSI_URDV
+e63d vstrl VSI_URDV
+e63f vstrlr VRS_RRDV
+e649 vlip VRI_V0UU2
+e650 vcvb VRR_RV0U
+e652 vcvbg VRR_RV0U
+e658 vcvd VRI_VR0UU
+e659 vsrp VRI_VVUUU2
+e65a vcvdg VRI_VR0UU
+e65b vpsop VRI_VVUUU2
+e65f vtp VRR_0V
+e671 vap VRI_VVV0UU2
+e673 vsp VRI_VVV0UU2
+e677 vcp VRR_0VV0U
+e678 vmp VRI_VVV0UU2
+e679 vmsp VRI_VVV0UU2
+e67a vdp VRI_VVV0UU2
+e67b vrp VRI_VVV0UU2
+e67e vsdp VRI_VVV0UU2
+e700 vleb VRX_VRRDU
+e701 vleh VRX_VRRDU
+e702 vleg VRX_VRRDU
+e703 vlef VRX_VRRDU
+e704 vllez VRX_VRRDU
+e705 vlrep VRX_VRRDU
+e706 vl VRX_VRRD
+e707 vlbb VRX_VRRDU
+e708 vsteb VRX_VRRDU
+e709 vsteh VRX_VRRDU
+e70a vsteg VRX_VRRDU
+e70b vstef VRX_VRRDU
+e70e vst VRX_VRRD
+e712 vgeg VRV_VVXRDU
+e713 vgef VRV_VVXRDU
+e71a vsceg VRV_VVXRDU
+e71b vscef VRV_VVXRDU
+e721 vlgv VRS_RVRDU
+e722 vlvg VRS_VRRDU
+e727 lcbb RXE_RRRDU
+e730 vesl VRS_VVRDU
+e733 verll VRS_VVRDU
+e736 vlm VRS_VVRD
+e737 vll VRS_VRRD
+e738 vesrl VRS_VVRDU
+e73a vesra VRS_VVRDU
+e73e vstm VRS_VVRD
+e73f vstl VRS_VRRD
+e740 vleib VRI_V0IU
+e741 vleih VRI_V0IU
+e742 vleig VRI_V0IU
+e743 vleif VRI_V0IU
+e744 vgbm VRI_V0U
+e745 vrepi VRI_V0IU
+e746 vgm VRI_V0UUU
+e74a vftci VRI_VVUUU
+e74d vrep VRI_VVUU
+e750 vpopct VRR_VV0U
+e752 vctz VRR_VV0U
+e753 vclz VRR_VV0U
+e756 vlr VRX_VV
+e75c vistr VRR_VV0U0U
+e75f vseg VRR_VV0U
+e760 vmrl VRR_VVV0U
+e761 vmrh VRR_VVV0U
+e762 vlvgp VRR_VRR
+e764 vsum VRR_VVV0U
+e765 vsumg VRR_VVV0U
+e766 vcksm VRR_VVV
+e767 vsumq VRR_VVV0U
+e768 vn VRR_VVV
+e769 vnc VRR_VVV
+e76a vo VRR_VVV
+e76b vno VRR_VVV
+e76c vnx VRR_VVV
+e76d vx VRR_VVV
+e76e vnn VRR_VVV
+e76f voc VRR_VVV
+e770 veslv VRR_VVV0U
+e772 verim VRI_VVV0UU
+e773 verllv VRR_VVV0U
+e774 vsl VRR_VVV
+e775 vslb VRR_VVV
+e777 vsldb VRI_VVV0U
+e778 vesrlv VRR_VVV0U
+e77a vesrav VRR_VVV0U
+e77c vsrl VRR_VVV
+e77d vsrlb VRR_VVV
+e77e vsra VRR_VVV
+e77f vsrab VRR_VVV
+e780 vfee VRR_VVV0U0U
+e781 vfene VRR_VVV0U0U
+e782 vfae VRR_VVV0U0U
+e784 vpdi VRR_VVV0U
+e785 vbperm VRR_VVV
+e78a vstrc VRR_VVVUU0V
+e78c vperm VRR_VVV0V
+e78d vsel VRR_VVV0V
+e78e vfms VRR_VVVU0UV
+e78f vfma VRR_VVVU0UV
+e794 vpk VRR_VVV0U
+e795 vpkls VRR_VVV0U0U
+e797 vpks VRR_VVV0U0U
+e79e vfnms VRR_VVVU0UV
+e79f vfnma VRR_VVVU0UV
+e7a1 vmlh VRR_VVV0U
+e7a2 vml VRR_VVV0U
+e7a3 vmh VRR_VVV0U
+e7a4 vmle VRR_VVV0U
+e7a5 vmlo VRR_VVV0U
+e7a6 vme VRR_VVV0U
+e7a7 vmo VRR_VVV0U
+e7a9 vmalh VRR_VVVU0V
+e7aa vmal VRR_VVVU0V
+e7ab vmah VRR_VVVU0V
+e7ac vmale VRR_VVVU0V
+e7ad vmalo VRR_VVVU0V
+e7ae vmae VRR_VVVU0V
+e7af vmao VRR_VVVU0V
+e7b4 vgfm VRR_VVV0U
+e7b8 vmsl VRR_VVVUU0V
+e7b9 vaccc VRR_VVVU0V
+e7bb vac VRR_VVVU0V
+e7bc vgfma VRR_VVVU0V
+e7bd vsbcbi VRR_VVVU0V
+e7bf vsbi VRR_VVVU0V
+e7c0 vclgd VRR_VV0UUU
+e7c1 vcdlg VRR_VV0UUU
+e7c2 vcgd VRR_VV0UUU
+e7c3 vcdg VRR_VV0UUU
+e7c4 vlde VRR_VV0UU2
+e7c5 vled VRR_VV0UUU
+e7c7 vfi VRR_VV0UUU
+e7ca wfk VRR_VV0UU2
+e7cb wfc VRR_VV0UU2
+e7cc vfpso VRR_VV0UUU
+e7ce vfsq VRR_VV0UU2
+e7d4 vupll VRR_VV0U
+e7d5 vuplh VRR_VV0U
+e7d6 vupl VRR_VV0U
+e7d7 vuph VRR_VV0U
+e7d8 vtm VRR_VV
+e7d9 vecl VRR_VV0U
+e7db vec VRR_VV0U
+e7de vlc VRR_VV0U
+e7df vlp VRR_VV0U
+e7e2 vfs VRR_VVV0UU
+e7e3 vfa VRR_VVV0UU
+e7e5 vfd VRR_VVV0UU
+e7e7 vfm VRR_VVV0UU
+e7e8 vfce VRR_VVV0UUU
+e7ea vfche VRR_VVV0UUU
+e7eb vfch VRR_VVV0UUU
+e7ee vfmin VRR_VVV0UUU
+e7ef vfmax VRR_VVV0UUU
+e7f0 vavgl VRR_VVV0U
+e7f1 vacc VRR_VVV0U
+e7f2 vavg VRR_VVV0U
+e7f3 va VRR_VVV0U
+e7f5 vscbi VRR_VVV0U
+e7f7 vs VRR_VVV0U
+e7f8 vceq VRR_VVV0U0U
+e7f9 vchl VRR_VVV0U0U
+e7fb vch VRR_VVV0U0U
+e7fc vmnl VRR_VVV0U
+e7fd vmxl VRR_VVV0U
+e7fe vmn VRR_VVV0U
+e7ff vmx VRR_VVV0U
+e8 mvcin SS_L0RDRD
+e9 pka SS_L2RDRD
+ea unpka SS_L0RDRD
+eb04 lmg RSY_RRRD
+eb0a srag RSY_RRRD
+eb0b slag RSY_RRRD
+eb0c srlg RSY_RRRD
+eb0d sllg RSY_RRRD
+eb0f tracg RSY_RRRD
+eb14 csy RSY_RRRD
+eb17 stcctm RSY_RURD
+eb1c rllg RSY_RRRD
+eb1d rll RSY_RRRD
+eb20 clmh RSY_RURD
+eb21 clmy RSY_RURD
+eb23 clt RSY_RURD
+eb24 stmg RSY_RRRD
+eb25 stctg RSY_CCRD
+eb26 stmh RSY_RRRD
+eb2b clgt RSY_RURD
+eb2c stcmh RSY_RURD
+eb2d stcmy RSY_RURD
+eb2f lctlg RSY_CCRD
+eb30 csg RSY_RRRD
+eb31 cdsy RSY_RRRD
+eb3e cdsg RSY_RRRD
+eb44 bxhg RSY_RRRD
+eb45 bxleg RSY_RRRD
+eb4c ecag RSY_RRRD
+eb51 tmy SIY_URD
+eb52 mviy SIY_URD
+eb54 niy SIY_URD
+eb55 cliy SIY_URD
+eb56 oiy SIY_URD
+eb57 xiy SIY_URD
+eb60 lric RSY_RDRU
+eb61 stric RSY_RDRU
+eb62 mric RSY_RDRU
+eb6a asi SIY_IRD
+eb6e alsi SIY_IRD
+eb7a agsi SIY_IRD
+eb7e algsi SIY_IRD
+eb80 icmh RSY_RURD
+eb81 icmy RSY_RURD
+eb8e mvclu RSY_RRRD
+eb8f clclu RSY_RRRD
+eb90 stmy RSY_RRRD
+eb96 lmh RSY_RRRD
+eb98 lmy RSY_RRRD
+eb9a lamy RSY_AARD
+eb9b stamy RSY_AARD
+ebc0 tp RSL_R0RD
+ebd0 pcistb RSY_RRRD
+ebd1 sic RSY_RRRD
+ebdc srak RSY_RRRD
+ebdd slak RSY_RRRD
+ebde srlk RSY_RRRD
+ebdf sllk RSY_RRRD
+ebe0 locfh RSY_RURD2
+ebe1 stocfh RSY_RURD2
+ebe2 locg RSY_RURD2
+ebe3 stocg RSY_RURD2
+ebe4 lang RSY_RRRD
+ebe6 laog RSY_RRRD
+ebe7 laxg RSY_RRRD
+ebe8 laag RSY_RRRD
+ebea laalg RSY_RRRD
+ebf2 loc RSY_RURD2
+ebf3 stoc RSY_RURD2
+ebf4 lan RSY_RRRD
+ebf6 lao RSY_RRRD
+ebf7 lax RSY_RRRD
+ebf8 laa RSY_RRRD
+ebfa laal RSY_RRRD
+ec42 lochi RIE_RUI0
+ec44 brxhg RIE_RRP
+ec45 brxlg RIE_RRP
+ec46 locghi RIE_RUI0
+ec4e lochhi RIE_RUI0
+ec51 risblg RIE_RRUUU
+ec54 rnsbg RIE_RRUUU
+ec55 risbg RIE_RRUUU
+ec56 rosbg RIE_RRUUU
+ec57 rxsbg RIE_RRUUU
+ec59 risbgn RIE_RRUUU
+ec5d risbhg RIE_RRUUU
+ec64 cgrj RIE_RRPU
+ec65 clgrj RIE_RRPU
+ec70 cgit RIE_R0IU
+ec71 clgit RIE_R0UU
+ec72 cit RIE_R0IU
+ec73 clfit RIE_R0UU
+ec76 crj RIE_RRPU
+ec77 clrj RIE_RRPU
+ec7c cgij RIE_RUPI
+ec7d clgij RIE_RUPU
+ec7e cij RIE_RUPI
+ec7f clij RIE_RUPU
+ecd8 ahik RIE_RRI0
+ecd9 aghik RIE_RRI0
+ecda alhsik RIE_RRI0
+ecdb alghsik RIE_RRI0
+ece4 cgrb RRS_RRRDU
+ece5 clgrb RRS_RRRDU
+ecf6 crb RRS_RRRDU
+ecf7 clrb RRS_RRRDU
+ecfc cgib RIS_RURDI
+ecfd clgib RIS_RURDU
+ecfe cib RIS_RURDI
+ecff clib RIS_RURDU
+ed04 ldeb RXE_FRRD
+ed05 lxdb RXE_FRRD
+ed06 lxeb RXE_FRRD
+ed07 mxdb RXE_FRRD
+ed08 keb RXE_FRRD
+ed09 ceb RXE_FRRD
+ed0a aeb RXE_FRRD
+ed0b seb RXE_FRRD
+ed0c mdeb RXE_FRRD
+ed0d deb RXE_FRRD
+ed0e maeb RXF_FRRDF
+ed0f mseb RXF_FRRDF
+ed10 tceb RXE_FRRD
+ed11 tcdb RXE_FRRD
+ed12 tcxb RXE_FRRD
+ed14 sqeb RXE_FRRD
+ed15 sqdb RXE_FRRD
+ed17 meeb RXE_FRRD
+ed18 kdb RXE_FRRD
+ed19 cdb RXE_FRRD
+ed1a adb RXE_FRRD
+ed1b sdb RXE_FRRD
+ed1c mdb RXE_FRRD
+ed1d ddb RXE_FRRD
+ed1e madb RXF_FRRDF
+ed1f msdb RXF_FRRDF
+ed24 lde RXE_FRRD
+ed25 lxd RXE_FRRD
+ed26 lxe RXE_FRRD
+ed2e mae RXF_FRRDF
+ed2f mse RXF_FRRDF
+ed34 sqe RXE_FRRD
+ed35 sqd RXE_FRRD
+ed37 mee RXE_FRRD
+ed38 mayl RXF_FRRDF
+ed39 myl RXF_FRRDF
+ed3a may RXF_FRRDF
+ed3b my RXF_FRRDF
+ed3c mayh RXF_FRRDF
+ed3d myh RXF_FRRDF
+ed3e mad RXF_FRRDF
+ed3f msd RXF_FRRDF
+ed40 sldt RXF_FRRDF
+ed41 srdt RXF_FRRDF
+ed48 slxt RXF_FRRDF
+ed49 srxt RXF_FRRDF
+ed50 tdcet RXE_FRRD
+ed51 tdget RXE_FRRD
+ed54 tdcdt RXE_FRRD
+ed55 tdgdt RXE_FRRD
+ed58 tdcxt RXE_FRRD
+ed59 tdgxt RXE_FRRD
+ed64 ley RXY_FRRD
+ed65 ldy RXY_FRRD
+ed66 stey RXY_FRRD
+ed67 stdy RXY_FRRD
+eda8 czdt RSL_LRDFU
+eda9 czxt RSL_LRDFU
+edaa cdzt RSL_LRDFU
+edab cxzt RSL_LRDFU
+edac cpdt RSL_LRDFU
+edad cpxt RSL_LRDFU
+edae cdpt RSL_LRDFU
+edaf cxpt RSL_LRDFU
+ee plo SS_RRRDRD2
+ef lmd SS_RRRDRD3
+f0 srp SS_LIRDRD
+f1 mvo SS_LLRDRD
+f2 pack SS_LLRDRD
+f3 unpk SS_LLRDRD
+f8 zap SS_LLRDRD
+f9 cp SS_LLRDRD
+fa ap SS_LLRDRD
+fb sp SS_LLRDRD
+fc mp SS_LLRDRD
+fd dp SS_LLRDRD