summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 21:01:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 21:01:30 -0700
commit95fbef17e8253775876a08ec2011d3665b86a55f (patch)
tree081f831e5aaa659ce36cafa20a762da52d64a422
parent67c642e0d9aa927c1340638e472f2467fefd1dbf (diff)
parent94d3477897481b92874654455e263e0b1728acb5 (diff)
downloadlinux-95fbef17e8253775876a08ec2011d3665b86a55f.tar.bz2
Merge tag 's390-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens: - Make use of the IBM z16 processor activity instrumentation facility to count cryptography operations: add a new PMU device driver so that perf can make use of this. - Add new IBM z16 extended counter set to cpumf support. - Add vdso randomization support. - Add missing KCSAN instrumentation to barriers and spinlocks, which should make s390's KCSAN support complete. - Add support for IPL-complete-control facility: notify the hypervisor that kexec finished work and the kernel starts. - Improve error logging for PCI. - Various small changes to workaround llvm's integrated assembler limitations, and one bug, to make it finally possible to compile the kernel with llvm's integrated assembler. This also requires to raise the minimum clang version to 14.0.0. - Various other small enhancements, bug fixes, and cleanups all over the place. * tag 's390-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (48 commits) s390/head: get rid of 31 bit leftovers scripts/min-tool-version.sh: raise minimum clang version to 14.0.0 for s390 s390/boot: do not emit debug info for assembly with llvm's IAS s390/boot: workaround llvm IAS bug s390/purgatory: workaround llvm's IAS limitations s390/entry: workaround llvm's IAS limitations s390/alternatives: remove padding generation code s390/alternatives: provide identical sized orginal/alternative sequences s390/cpumf: add new extended counter set for IBM z16 s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES s390/stp: clock_delta should be signed s390/stp: fix todoff size s390/pai: add support for cryptography counters entry: Rename arch_check_user_regs() to arch_enter_from_user_mode() s390/compat: cleanup compat_linux.h header file s390/entry: remove broken and not needed code s390/boot: convert parmarea to C s390/boot: convert initial lowcore to C s390/ptrace: move short psw definitions to ptrace header file s390/head: initialize all new psws ...
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/.gitignore3
-rw-r--r--arch/s390/boot/Makefile78
-rw-r--r--arch/s390/boot/boot.h6
-rw-r--r--arch/s390/boot/clz_ctz.c (renamed from arch/s390/boot/compressed/clz_ctz.c)0
-rw-r--r--arch/s390/boot/compressed/.gitignore4
-rw-r--r--arch/s390/boot/compressed/Makefile86
-rw-r--r--arch/s390/boot/decompressor.c (renamed from arch/s390/boot/compressed/decompressor.c)0
-rw-r--r--arch/s390/boot/decompressor.h (renamed from arch/s390/boot/compressed/decompressor.h)0
-rw-r--r--arch/s390/boot/head.S366
-rw-r--r--arch/s390/boot/ipl_data.c84
-rw-r--r--arch/s390/boot/ipl_parm.c7
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/boot/mem_detect.c2
-rw-r--r--arch/s390/boot/startup.c2
-rw-r--r--arch/s390/boot/vmlinux.lds.S (renamed from arch/s390/boot/compressed/vmlinux.lds.S)9
-rw-r--r--arch/s390/crypto/des_s390.c2
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/alternative-asm.h76
-rw-r--r--arch/s390/include/asm/alternative.h93
-rw-r--r--arch/s390/include/asm/asm-extable.h12
-rw-r--r--arch/s390/include/asm/barrier.h16
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h25
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/include/asm/entry-common.h14
-rw-r--r--arch/s390/include/asm/ipl.h6
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/nospec-insn.h12
-rw-r--r--arch/s390/include/asm/pai.h74
-rw-r--r--arch/s390/include/asm/pci_debug.h7
-rw-r--r--arch/s390/include/asm/preempt.h15
-rw-r--r--arch/s390/include/asm/processor.h8
-rw-r--r--arch/s390/include/asm/ptrace.h29
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/scsw.h83
-rw-r--r--arch/s390/include/asm/spinlock.h3
-rw-r--r--arch/s390/include/asm/stp.h4
-rw-r--r--arch/s390/include/asm/vx-insn.h6
-rw-r--r--arch/s390/include/uapi/asm/pkey.h2
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h42
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/alternative.c61
-rw-r--r--arch/s390/kernel/compat_linux.h89
-rw-r--r--arch/s390/kernel/entry.S40
-rw-r--r--arch/s390/kernel/irq.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c10
-rw-r--r--arch/s390/kernel/nmi.c6
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c148
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c688
-rw-r--r--arch/s390/kernel/relocate_kernel.S3
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/time.c12
-rw-r--r--arch/s390/kernel/vdso.c55
-rw-r--r--arch/s390/kvm/priv.c1
-rw-r--r--arch/s390/lib/spinlock.c4
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/s390/pci/pci.c2
-rw-r--r--arch/s390/pci/pci_clp.c2
-rw-r--r--arch/s390/pci/pci_debug.c2
-rw-r--r--arch/s390/pci/pci_event.c3
-rw-r--r--arch/s390/pci/pci_insn.c108
-rw-r--r--arch/s390/purgatory/head.S30
-rw-r--r--arch/x86/include/asm/entry-common.h4
-rw-r--r--drivers/s390/char/con3215.c25
-rw-r--r--drivers/s390/char/con3270.c31
-rw-r--r--drivers/s390/char/raw3270.c15
-rw-r--r--drivers/s390/char/raw3270.h1
-rw-r--r--drivers/s390/char/sclp_con.c26
-rw-r--r--drivers/s390/char/sclp_early.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c42
-rw-r--r--drivers/s390/cio/chsc.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c96
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_queue.c7
-rw-r--r--drivers/s390/crypto/pkey_api.c149
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c215
-rw-r--r--drivers/s390/crypto/zcrypt_api.h4
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h58
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c272
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c11
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c10
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c44
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c168
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h2
-rw-r--r--drivers/s390/crypto/zcrypt_error.h3
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c31
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c340
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h26
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--include/linux/entry-common.h8
-rw-r--r--kernel/entry/common.c2
-rwxr-xr-xscripts/min-tool-version.sh3
98 files changed, 2553 insertions, 1534 deletions
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index df325eacf62d..80eb3ee84ff1 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -20,7 +20,9 @@ LDFLAGS_vmlinux := -pie
endif
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
+ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+endif
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore
index b265bfede188..f56591bc0897 100644
--- a/arch/s390/boot/.gitignore
+++ b/arch/s390/boot/.gitignore
@@ -2,3 +2,6 @@
image
bzImage
section_cmp.*
+vmlinux
+vmlinux.lds
+vmlinux.syms
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 0ba646899131..883357a211a3 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -37,14 +37,21 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y += version.o pgm_check_info.o ctype.o
+obj-y += version.o pgm_check_info.o ctype.o ipl_data.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
-targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
-subdir- := compressed
+obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
+obj-all := $(obj-y) piggy.o syms.o
+
+targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
+targets += vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += vmlinux.bin.zst info.bin syms.bin vmlinux.syms $(obj-all)
OBJECTS := $(addprefix $(obj)/,$(obj-y))
+OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all))
quiet_cmd_section_cmp = SECTCMP $*
define cmd_section_cmp
@@ -59,14 +66,67 @@ define cmd_section_cmp
touch $@
endef
-$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
+$(obj)/bzImage: $(obj)/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
$(call if_changed,objcopy)
-$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
+$(obj)/section_cmp%: vmlinux $(obj)/vmlinux FORCE
$(call if_changed,section_cmp)
-$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
- $(Q)$(MAKE) $(build)=$(obj)/compressed $@
+LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS_ALL) FORCE
+ $(call if_changed,ld)
+
+LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T
+$(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(OBJECTS) FORCE
+ $(call if_changed,ld)
+
+quiet_cmd_dumpsyms = DUMPSYMS $<
+define cmd_dumpsyms
+ $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@"
+endef
+
+$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE
+ $(call if_changed,dumpsyms)
+
+OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms
+$(obj)/syms.o: $(obj)/syms.bin FORCE
+ $(call if_changed,objcopy)
+
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
+$(obj)/info.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info
+$(obj)/info.o: $(obj)/info.bin FORCE
+ $(call if_changed,objcopy)
+
+OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+suffix-$(CONFIG_KERNEL_GZIP) := .gz
+suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
+suffix-$(CONFIG_KERNEL_LZ4) := .lz4
+suffix-$(CONFIG_KERNEL_LZMA) := .lzma
+suffix-$(CONFIG_KERNEL_LZO) := .lzo
+suffix-$(CONFIG_KERNEL_XZ) := .xz
+suffix-$(CONFIG_KERNEL_ZSTD) := .zst
-$(obj)/startup.a: $(OBJECTS) FORCE
- $(call if_changed,ar)
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2_with_size)
+$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lz4_with_size)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma_with_size)
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzo_with_size)
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,xzkern_with_size)
+$(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,zstd22_with_size)
+
+OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
+$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
+ $(call if_changed,objcopy)
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 641ce0fc5c3e..70418389414d 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -2,9 +2,12 @@
#ifndef BOOT_BOOT_H
#define BOOT_BOOT_H
-#include <asm/extable.h>
#include <linux/types.h>
+#define IPL_START 0x200
+
+#ifndef __ASSEMBLY__
+
void startup_kernel(void);
unsigned long detect_memory(void);
bool is_ipl_block_dump(void);
@@ -31,4 +34,5 @@ extern char _stack_start[], _stack_end[];
unsigned long read_ipl_report(unsigned long safe_offset);
+#endif /* __ASSEMBLY__ */
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/compressed/clz_ctz.c b/arch/s390/boot/clz_ctz.c
index c3ebf248596b..c3ebf248596b 100644
--- a/arch/s390/boot/compressed/clz_ctz.c
+++ b/arch/s390/boot/clz_ctz.c
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
deleted file mode 100644
index 01d93832cf4a..000000000000
--- a/arch/s390/boot/compressed/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-vmlinux
-vmlinux.lds
-vmlinux.syms
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
deleted file mode 100644
index d04e0e7de0b3..000000000000
--- a/arch/s390/boot/compressed/Makefile
+++ /dev/null
@@ -1,86 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# linux/arch/s390/boot/compressed/Makefile
-#
-# create a compressed vmlinux image from the original vmlinux
-#
-
-KCOV_INSTRUMENT := n
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KASAN_SANITIZE := n
-KCSAN_SANITIZE := n
-
-obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
-obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
-obj-all := $(obj-y) piggy.o syms.o
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
-targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += vmlinux.bin.zst
-targets += info.bin syms.bin vmlinux.syms $(obj-all)
-
-KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
-KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
-OBJCOPYFLAGS :=
-
-OBJECTS := $(addprefix $(obj)/,$(obj-y))
-OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all))
-
-LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS_ALL) FORCE
- $(call if_changed,ld)
-
-LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
- $(call if_changed,ld)
-
-quiet_cmd_dumpsyms = DUMPSYMS $<
-define cmd_dumpsyms
- $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@"
-endef
-
-$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE
- $(call if_changed,dumpsyms)
-
-OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms
-$(obj)/syms.o: $(obj)/syms.bin FORCE
- $(call if_changed,objcopy)
-
-OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
-$(obj)/info.bin: vmlinux FORCE
- $(call if_changed,objcopy)
-
-OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info
-$(obj)/info.o: $(obj)/info.bin FORCE
- $(call if_changed,objcopy)
-
-OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S
-$(obj)/vmlinux.bin: vmlinux FORCE
- $(call if_changed,objcopy)
-
-suffix-$(CONFIG_KERNEL_GZIP) := .gz
-suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
-suffix-$(CONFIG_KERNEL_LZ4) := .lz4
-suffix-$(CONFIG_KERNEL_LZMA) := .lzma
-suffix-$(CONFIG_KERNEL_LZO) := .lzo
-suffix-$(CONFIG_KERNEL_XZ) := .xz
-suffix-$(CONFIG_KERNEL_ZSTD) := .zst
-
-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
- $(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
- $(call if_changed,bzip2_with_size)
-$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE
- $(call if_changed,lz4_with_size)
-$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
- $(call if_changed,lzma_with_size)
-$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
- $(call if_changed,lzo_with_size)
-$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
- $(call if_changed,xzkern_with_size)
-$(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE
- $(call if_changed,zstd22_with_size)
-
-OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
-$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
- $(call if_changed,objcopy)
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/decompressor.c
index e27c2140d620..e27c2140d620 100644
--- a/arch/s390/boot/compressed/decompressor.c
+++ b/arch/s390/boot/decompressor.c
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/decompressor.h
index f75cc31a77dd..f75cc31a77dd 100644
--- a/arch/s390/boot/compressed/decompressor.h
+++ b/arch/s390/boot/decompressor.h
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 666692429db0..3f79b9efb803 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -27,234 +27,181 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sclp.h>
-
-#define ARCH_OFFSET 4
+#include "boot.h"
#define EP_OFFSET 0x10008
#define EP_STRING "S390EP"
+#define IPL_BS 0x730
__HEAD
-
-#define IPL_BS 0x730
- .org 0
- .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
- .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
- .long 0x02000068,0x60000050 # (a PSW and two CCWs).
- .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
- .long 0x020000f0,0x60000050 # The next 160 byte are loaded
- .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
- .long 0x02000190,0x60000050 # They form the continuation
- .long 0x020001e0,0x60000050 # of the CCW program started
- .long 0x02000230,0x60000050 # by ipl and load the range
- .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
- .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
- .long 0x02000320,0x60000050 # in memory. At the end of
- .long 0x02000370,0x60000050 # the channel program the PSW
- .long 0x020003c0,0x60000050 # at location 0 is loaded.
- .long 0x02000410,0x60000050 # Initial processing starts
- .long 0x02000460,0x60000050 # at 0x200 = iplstart.
- .long 0x020004b0,0x60000050
- .long 0x02000500,0x60000050
- .long 0x02000550,0x60000050
- .long 0x020005a0,0x60000050
- .long 0x020005f0,0x60000050
- .long 0x02000640,0x60000050
- .long 0x02000690,0x60000050
- .long 0x020006e0,0x20000050
-
- .org __LC_RST_NEW_PSW # 0x1a0
- .quad 0,iplstart
- .org __LC_EXT_NEW_PSW # 0x1b0
- .quad 0x0002000180000000,0x1b0 # disabled wait
- .org __LC_PGM_NEW_PSW # 0x1d0
- .quad 0x0000000180000000,startup_pgm_check_handler
- .org __LC_IO_NEW_PSW # 0x1f0
- .quad 0x0002000180000000,0x1f0 # disabled wait
-
- .org 0x200
-
+ipl_start:
+ mvi __LC_AR_MODE_ID,1 # set esame flag
+ slr %r0,%r0 # set cpuid to zero
+ lhi %r1,2 # mode 2 = esame (dump)
+ sigp %r1,%r0,0x12 # switch to esame mode
+ sam64 # switch to 64 bit addressing mode
+ lgh %r1,__LC_SUBCHANNEL_ID # test if subchannel number
+ brctg %r1,.Lnoload # is valid
+ llgf %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number
+ lghi %r2,IPL_BS # load start address
+ bras %r14,.Lloader # load rest of ipl image
+ larl %r12,parmarea # pointer to parameter area
+ stg %r1,IPL_DEVICE-PARMAREA(%r12) # save ipl device number
+#
+# load parameter file from ipl device
+#
+.Lagain1:
+ larl %r2,_end # ramdisk loc. is temp
+ bras %r14,.Lloader # load parameter file
+ ltgr %r2,%r2 # got anything ?
+ jz .Lnopf
+ lg %r3,MAX_COMMAND_LINE_SIZE-PARMAREA(%r12)
+ aghi %r3,-1
+ clgr %r2,%r3
+ jl .Lnotrunc
+ lgr %r2,%r3
+.Lnotrunc:
+ larl %r4,_end
+ larl %r13,.L_hdr
+ clc 0(3,%r4),0(%r13) # if it is HDRx
+ jz .Lagain1 # skip dataset header
+ larl %r13,.L_eof
+ clc 0(3,%r4),0(%r13) # if it is EOFx
+ jz .Lagain1 # skip dateset trailer
+ lgr %r5,%r2
+ la %r6,COMMAND_LINE-PARMAREA(%r12)
+ lgr %r7,%r2
+ aghi %r7,1
+ mvcl %r6,%r4
+.Lnopf:
+#
+# load ramdisk from ipl device
+#
+.Lagain2:
+ larl %r2,_end # addr of ramdisk
+ stg %r2,INITRD_START-PARMAREA(%r12)
+ bras %r14,.Lloader # load ramdisk
+ stg %r2,INITRD_SIZE-PARMAREA(%r12) # store size of rd
+ ltgr %r2,%r2
+ jnz .Lrdcont
+ stg %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found
+.Lrdcont:
+ larl %r2,_end
+ larl %r13,.L_hdr # skip HDRx and EOFx
+ clc 0(3,%r2),0(%r13)
+ jz .Lagain2
+ larl %r13,.L_eof
+ clc 0(3,%r2),0(%r13)
+ jz .Lagain2
+#
+# reset files in VM reader
+#
+ larl %r13,.Lcpuid
+ stidp 0(%r13) # store cpuid
+ tm 0(%r13),0xff # running VM ?
+ jno .Lnoreset
+ larl %r2,.Lreset
+ lghi %r3,26
+ diag %r2,%r3,8
+ larl %r5,.Lirb
+ stsch 0(%r5) # check if irq is pending
+ tm 30(%r5),0x0f # by verifying if any of the
+ jnz .Lwaitforirq # activity or status control
+ tm 31(%r5),0xff # bits is set in the schib
+ jz .Lnoreset
+.Lwaitforirq:
+ bras %r14,.Lirqwait # wait for IO interrupt
+ c %r1,__LC_SUBCHANNEL_ID # compare subchannel number
+ jne .Lwaitforirq
+ larl %r5,.Lirb
+ tsch 0(%r5)
+.Lnoreset:
+ j .Lnoload
+#
+# everything loaded, go for it
+#
+.Lnoload:
+ jg startup
#
# subroutine to wait for end I/O
#
.Lirqwait:
- mvc __LC_IO_NEW_PSW(16),.Lnewpsw # set up IO interrupt psw
- lpsw .Lwaitpsw
+ larl %r13,.Lnewpswmask # set up IO interrupt psw
+ mvc __LC_IO_NEW_PSW(8),0(%r13)
+ stg %r14,__LC_IO_NEW_PSW+8
+ larl %r13,.Lwaitpsw
+ lpswe 0(%r13)
.Lioint:
- br %r14
- .align 8
-.Lnewpsw:
- .quad 0x0000000080000000,.Lioint
-.Lwaitpsw:
- .long 0x020a0000,0x80000000+.Lioint
-
#
# subroutine for loading cards from the reader
#
.Lloader:
- la %r4,0(%r14)
- la %r3,.Lorb # r2 = address of orb into r2
- la %r5,.Lirb # r4 = address of irb
- la %r6,.Lccws
- la %r7,20
+ lgr %r4,%r14
+ larl %r3,.Lorb # r2 = address of orb into r2
+ larl %r5,.Lirb # r4 = address of irb
+ larl %r6,.Lccws
+ lghi %r7,20
.Linit:
st %r2,4(%r6) # initialize CCW data addresses
la %r2,0x50(%r2)
la %r6,8(%r6)
- bct 7,.Linit
-
- lctl %c6,%c6,.Lcr6 # set IO subclass mask
- slr %r2,%r2
+ brctg %r7,.Linit
+ larl %r13,.Lcr6
+ lctlg %c6,%c6,0(%r13)
+ xgr %r2,%r2
.Lldlp:
ssch 0(%r3) # load chunk of 1600 bytes
- bnz .Llderr
+ jnz .Llderr
.Lwait4irq:
- bas %r14,.Lirqwait
+ bras %r14,.Lirqwait
c %r1,__LC_SUBCHANNEL_ID # compare subchannel number
- bne .Lwait4irq
+ jne .Lwait4irq
tsch 0(%r5)
-
- slr %r0,%r0
+ xgr %r0,%r0
ic %r0,8(%r5) # get device status
- chi %r0,8 # channel end ?
- be .Lcont
- chi %r0,12 # channel end + device end ?
- be .Lcont
-
- l %r0,4(%r5)
- s %r0,8(%r3) # r0/8 = number of ccws executed
- mhi %r0,10 # *10 = number of bytes in ccws
- lh %r3,10(%r5) # get residual count
- sr %r0,%r3 # #ccws*80-residual=#bytes read
- ar %r2,%r0
-
+ cghi %r0,8 # channel end ?
+ je .Lcont
+ cghi %r0,12 # channel end + device end ?
+ je .Lcont
+ llgf %r0,4(%r5)
+ sgf %r0,8(%r3) # r0/8 = number of ccws executed
+ mghi %r0,10 # *10 = number of bytes in ccws
+ llgh %r3,10(%r5) # get residual count
+ sgr %r0,%r3 # #ccws*80-residual=#bytes read
+ agr %r2,%r0
br %r4 # r2 contains the total size
-
.Lcont:
- ahi %r2,0x640 # add 0x640 to total size
- la %r6,.Lccws
- la %r7,20
+ aghi %r2,0x640 # add 0x640 to total size
+ larl %r6,.Lccws
+ lghi %r7,20
.Lincr:
l %r0,4(%r6) # update CCW data addresses
- ahi %r0,0x640
+ aghi %r0,0x640
st %r0,4(%r6)
- ahi %r6,8
- bct 7,.Lincr
-
- b .Lldlp
+ aghi %r6,8
+ brctg %r7,.Lincr
+ j .Lldlp
.Llderr:
- lpsw .Lcrash
+ larl %r13,.Lcrash
+ lpsw 0(%r13)
.align 8
+.Lwaitpsw:
+ .quad 0x0202000180000000,.Lioint
+.Lnewpswmask:
+ .quad 0x0000000180000000
+ .align 8
.Lorb: .long 0x00000000,0x0080ff00,.Lccws
.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6: .long 0xff000000
-.Lloadp:.long 0,0
+ .align 8
+.Lcr6: .quad 0x00000000ff000000
.align 8
.Lcrash:.long 0x000a0000,0x00000000
-
.align 8
.Lccws: .rept 19
.long 0x02600050,0x00000000
.endr
.long 0x02200050,0x00000000
-
-iplstart:
- mvi __LC_AR_MODE_ID,1 # set esame flag
- slr %r0,%r0 # set cpuid to zero
- lhi %r1,2 # mode 2 = esame (dump)
- sigp %r1,%r0,0x12 # switch to esame mode
- bras %r13,0f
- .fill 16,4,0x0
-0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
- sam31 # switch to 31 bit addressing mode
- lh %r1,__LC_SUBCHANNEL_ID # test if subchannel number
- bct %r1,.Lnoload # is valid
- l %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number
- la %r2,IPL_BS # load start address
- bas %r14,.Lloader # load rest of ipl image
- l %r12,.Lparm # pointer to parameter area
- st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
-
-#
-# load parameter file from ipl device
-#
-.Lagain1:
- l %r2,.Linitrd # ramdisk loc. is temp
- bas %r14,.Lloader # load parameter file
- ltr %r2,%r2 # got anything ?
- bz .Lnopf
- l %r3,MAX_COMMAND_LINE_SIZE+ARCH_OFFSET-PARMAREA(%r12)
- ahi %r3,-1
- clr %r2,%r3
- bl .Lnotrunc
- lr %r2,%r3
-.Lnotrunc:
- l %r4,.Linitrd
- clc 0(3,%r4),.L_hdr # if it is HDRx
- bz .Lagain1 # skip dataset header
- clc 0(3,%r4),.L_eof # if it is EOFx
- bz .Lagain1 # skip dateset trailer
-
- lr %r5,%r2
- la %r6,COMMAND_LINE-PARMAREA(%r12)
- lr %r7,%r2
- ahi %r7,1
- mvcl %r6,%r4
-.Lnopf:
-
-#
-# load ramdisk from ipl device
-#
-.Lagain2:
- l %r2,.Linitrd # addr of ramdisk
- st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
- bas %r14,.Lloader # load ramdisk
- st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd
- ltr %r2,%r2
- bnz .Lrdcont
- st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
-.Lrdcont:
- l %r2,.Linitrd
-
- clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
- bz .Lagain2
- clc 0(3,%r2),.L_eof
- bz .Lagain2
-
-#
-# reset files in VM reader
-#
- stidp .Lcpuid # store cpuid
- tm .Lcpuid,0xff # running VM ?
- bno .Lnoreset
- la %r2,.Lreset
- lhi %r3,26
- diag %r2,%r3,8
- la %r5,.Lirb
- stsch 0(%r5) # check if irq is pending
- tm 30(%r5),0x0f # by verifying if any of the
- bnz .Lwaitforirq # activity or status control
- tm 31(%r5),0xff # bits is set in the schib
- bz .Lnoreset
-.Lwaitforirq:
- bas %r14,.Lirqwait # wait for IO interrupt
- c %r1,__LC_SUBCHANNEL_ID # compare subchannel number
- bne .Lwaitforirq
- la %r5,.Lirb
- tsch 0(%r5)
-.Lnoreset:
- b .Lnoload
-
-#
-# everything loaded, go for it
-#
-.Lnoload:
- l %r1,.Lstartup
- br %r1
-
-.Linitrd:.long _end # default address of initrd
-.Lparm: .long PARMAREA
-.Lstartup: .long startup
.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
.byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
.byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
@@ -268,10 +215,10 @@ iplstart:
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
- .org STARTUP_NORMAL_OFFSET
+ .org STARTUP_NORMAL_OFFSET - IPL_START
SYM_CODE_START(startup)
j startup_normal
- .org EP_OFFSET
+ .org EP_OFFSET - IPL_START
#
# This is a list of s390 kernel entry points. At address 0x1000f the number of
# valid entry points is stored.
@@ -283,7 +230,7 @@ SYM_CODE_START(startup)
#
# kdump startup-code, running in 64 bit absolute addressing mode
#
- .org STARTUP_KDUMP_OFFSET
+ .org STARTUP_KDUMP_OFFSET - IPL_START
j startup_kdump
SYM_CODE_END(startup)
SYM_CODE_START_LOCAL(startup_normal)
@@ -295,20 +242,23 @@ SYM_CODE_START_LOCAL(startup_normal)
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam64 # switch to 64 bit addressing mode
- basr %r13,0 # get base
-.LPG0:
- mvc __LC_EXT_NEW_PSW(16),.Lext_new_psw-.LPG0(%r13)
- mvc __LC_PGM_NEW_PSW(16),.Lpgm_new_psw-.LPG0(%r13)
- mvc __LC_IO_NEW_PSW(16),.Lio_new_psw-.LPG0(%r13)
+ larl %r13,.Lext_new_psw
+ mvc __LC_EXT_NEW_PSW(16),0(%r13)
+ larl %r13,.Lpgm_new_psw
+ mvc __LC_PGM_NEW_PSW(16),0(%r13)
+ larl %r13,.Lio_new_psw
+ mvc __LC_IO_NEW_PSW(16),0(%r13)
xc 0x200(256),0x200 # partially clear lowcore
xc 0x300(256),0x300
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
- lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
+ larl %r13,.Lctl
+ lctlg %c0,%c15,0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
- spt 6f-.LPG0(%r13)
- mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
+ larl %r13,6f
+ spt 0(%r13)
+ mvc __LC_LAST_UPDATE_TIMER(8),0(%r13)
larl %r15,_stack_end-STACK_FRAME_OVERHEAD
brasl %r14,sclp_early_setup_buffer
brasl %r14,verify_facilities
@@ -368,23 +318,3 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler)
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
lpswe __LC_RETURN_PSW # disabled wait
SYM_CODE_END(startup_pgm_check_handler)
-
-#
-# params at 10400 (setup.h)
-# Must be keept in sync with struct parmarea in setup.h
-#
- .org PARMAREA
-SYM_DATA_START(parmarea)
- .quad 0 # IPL_DEVICE
- .quad 0 # INITRD_START
- .quad 0 # INITRD_SIZE
- .quad 0 # OLDMEM_BASE
- .quad 0 # OLDMEM_SIZE
- .quad kernel_version # points to kernel version string
- .quad COMMAND_LINE_SIZE
-
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
- .org PARMAREA+__PARMAREA_SIZE
-SYM_DATA_END(parmarea)
diff --git a/arch/s390/boot/ipl_data.c b/arch/s390/boot/ipl_data.c
new file mode 100644
index 000000000000..0846e2b249c6
--- /dev/null
+++ b/arch/s390/boot/ipl_data.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compat.h>
+#include <linux/ptrace.h>
+#include <asm/cio.h>
+#include <asm/asm-offsets.h>
+#include "boot.h"
+
+#define CCW0(cmd, addr, cnt, flg) \
+ { .cmd_code = cmd, .cda = addr, .count = cnt, .flags = flg, }
+
+#define PSW_MASK_DISABLED (PSW_MASK_WAIT | PSW_MASK_EA | PSW_MASK_BA)
+
+struct ipl_lowcore {
+ psw_t32 ipl_psw; /* 0x0000 */
+ struct ccw0 ccwpgm[2]; /* 0x0008 */
+ u8 fill[56]; /* 0x0018 */
+ struct ccw0 ccwpgmcc[20]; /* 0x0050 */
+ u8 pad_0xf0[0x01a0-0x00f0]; /* 0x00f0 */
+ psw_t restart_psw; /* 0x01a0 */
+ psw_t external_new_psw; /* 0x01b0 */
+ psw_t svc_new_psw; /* 0x01c0 */
+ psw_t program_new_psw; /* 0x01d0 */
+ psw_t mcck_new_psw; /* 0x01e0 */
+ psw_t io_new_psw; /* 0x01f0 */
+};
+
+/*
+ * Initial lowcore for IPL: the first 24 bytes are loaded by IPL to
+ * addresses 0-23 (a PSW and two CCWs). Bytes 24-79 are discarded.
+ * The next 160 bytes are loaded to addresses 0x18-0xb7. They form
+ * the continuation of the CCW program started by IPL and load the
+ * range 0x0f0-0x730 from the image to the range 0x0f0-0x730 in
+ * memory. At the end of the channel program the PSW at location 0 is
+ * loaded.
+ * Initial processing starts at 0x200 = iplstart.
+ *
+ * The restart psw points to iplstart which allows to load a kernel
+ * image into memory and starting it by a psw restart on any cpu. All
+ * other default psw new locations contain a disabled wait psw where
+ * the address indicates which psw was loaded.
+ *
+ * Note that the 'file' utility can detect s390 kernel images. For
+ * that to succeed the two initial CCWs, and the 0x40 fill bytes must
+ * be present.
+ */
+static struct ipl_lowcore ipl_lowcore __used __section(".ipldata") = {
+ .ipl_psw = { .mask = PSW32_MASK_BASE, .addr = PSW32_ADDR_AMODE | IPL_START },
+ .ccwpgm = {
+ [ 0] = CCW0(CCW_CMD_READ_IPL, 0x018, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 1] = CCW0(CCW_CMD_READ_IPL, 0x068, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ },
+ .fill = {
+ [ 0 ... 55] = 0x40,
+ },
+ .ccwpgmcc = {
+ [ 0] = CCW0(CCW_CMD_READ_IPL, 0x0f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 1] = CCW0(CCW_CMD_READ_IPL, 0x140, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 2] = CCW0(CCW_CMD_READ_IPL, 0x190, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 3] = CCW0(CCW_CMD_READ_IPL, 0x1e0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 4] = CCW0(CCW_CMD_READ_IPL, 0x230, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 5] = CCW0(CCW_CMD_READ_IPL, 0x280, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 6] = CCW0(CCW_CMD_READ_IPL, 0x2d0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 7] = CCW0(CCW_CMD_READ_IPL, 0x320, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 8] = CCW0(CCW_CMD_READ_IPL, 0x370, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [ 9] = CCW0(CCW_CMD_READ_IPL, 0x3c0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [10] = CCW0(CCW_CMD_READ_IPL, 0x410, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [11] = CCW0(CCW_CMD_READ_IPL, 0x460, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [12] = CCW0(CCW_CMD_READ_IPL, 0x4b0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [13] = CCW0(CCW_CMD_READ_IPL, 0x500, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [14] = CCW0(CCW_CMD_READ_IPL, 0x550, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [15] = CCW0(CCW_CMD_READ_IPL, 0x5a0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [16] = CCW0(CCW_CMD_READ_IPL, 0x5f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [17] = CCW0(CCW_CMD_READ_IPL, 0x640, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [18] = CCW0(CCW_CMD_READ_IPL, 0x690, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC),
+ [19] = CCW0(CCW_CMD_READ_IPL, 0x6e0, 0x50, CCW_FLAG_SLI),
+ },
+ .restart_psw = { .mask = 0, .addr = IPL_START, },
+ .external_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_EXT_NEW_PSW, },
+ .svc_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_SVC_NEW_PSW, },
+ .program_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_PGM_NEW_PSW, },
+ .mcck_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_MCK_NEW_PSW, },
+ .io_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_IO_NEW_PSW, },
+};
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 9ed7e29c81d9..ca78d6162245 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -8,9 +8,16 @@
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
+#include <asm/setup.h>
#include <asm/uv.h>
#include "boot.h"
+struct parmarea parmarea __section(".parmarea") = {
+ .kernel_version = (unsigned long)kernel_version,
+ .max_command_line_size = COMMAND_LINE_SIZE,
+ .command_line = "root=/dev/ram0 ro",
+};
+
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
int __bootdata(noexec_disabled);
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index d8984462071f..e8d74d4f62aa 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -8,7 +8,7 @@
#include <asm/timex.h>
#include <asm/sclp.h>
#include <asm/kasan.h>
-#include "compressed/decompressor.h"
+#include "decompressor.h"
#include "boot.h"
#define PRNG_MODE_TDES 1
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
index 2f949cd9076b..7fa1a32ea0f3 100644
--- a/arch/s390/boot/mem_detect.c
+++ b/arch/s390/boot/mem_detect.c
@@ -7,7 +7,7 @@
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include <asm/sparsemem.h>
-#include "compressed/decompressor.h"
+#include "decompressor.h"
#include "boot.h"
struct mem_detect_info __bootdata(mem_detect);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 1aa11a8f57dd..863e6bcaa5a1 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -10,7 +10,7 @@
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/uv.h>
-#include "compressed/decompressor.h"
+#include "decompressor.h"
#include "boot.h"
#include "uv.h"
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S
index 918e05137d4c..af5c6860e0a1 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/vmlinux.lds.S
@@ -4,6 +4,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/sclp.h>
+#include "boot.h"
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
@@ -13,11 +14,19 @@ ENTRY(startup)
SECTIONS
{
. = 0;
+ .ipldata : {
+ *(.ipldata)
+ }
+ . = IPL_START;
.head.text : {
_head = . ;
HEAD_TEXT
_ehead = . ;
}
+ . = PARMAREA;
+ .parmarea : {
+ *(.parmarea)
+ }
.text : {
_text = .; /* Text */
*(.text)
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index bfbafd35bcbd..e013088b5115 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -194,7 +194,7 @@ static struct skcipher_alg cbc_des_alg = {
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*
- * In fips mode additinally check for all 3 keys are unique.
+ * In fips mode additionally check for all 3 keys are unique.
*
*/
static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 234d791ca59d..ae382bafc772 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -528,7 +528,7 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
/* give mutex free before calling schedule() */
mutex_unlock(&prng_data->mutex);
schedule();
- /* occopy mutex again */
+ /* occupy mutex again */
if (mutex_lock_interruptible(&prng_data->mutex)) {
if (ret == 0)
ret = -ERESTARTSYS;
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 3765c2d81df5..a3d881ca0a98 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -190,7 +190,7 @@ int hypfs_vm_create_files(struct dentry *root)
if (IS_ERR(data))
return PTR_ERR(data);
- /* Hpervisor Info */
+ /* Hypervisor Info */
dir = hypfs_mkdir(root, "hyp");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
index bb3837d7387c..7db046596b93 100644
--- a/arch/s390/include/asm/alternative-asm.h
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -5,19 +5,6 @@
#ifdef __ASSEMBLY__
/*
- * Check the length of an instruction sequence. The length may not be larger
- * than 254 bytes and it has to be divisible by 2.
- */
-.macro alt_len_check start,end
- .if ( \end - \start ) > 254
- .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
- .endif
- .if ( \end - \start ) % 2
- .error "cpu alternatives instructions length is odd\n"
- .endif
-.endm
-
-/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
@@ -28,66 +15,29 @@
.long \alt_start - .
.word \feature
.byte \orig_end - \orig_start
- .byte \alt_end - \alt_start
-.endm
-
-/*
- * Fill up @bytes with nops. The macro emits 6-byte nop instructions
- * for the bulk of the area, possibly followed by a 4-byte and/or
- * a 2-byte nop if the size of the area is not divisible by 6.
- */
-.macro alt_pad_fill bytes
- .rept ( \bytes ) / 6
- brcl 0,0
- .endr
- .rept ( \bytes ) % 6 / 4
- nop
- .endr
- .rept ( \bytes ) % 6 % 4 / 2
- nopr
- .endr
-.endm
-
-/*
- * Fill up @bytes with nops. If the number of bytes is larger
- * than 6, emit a jg instruction to branch over all nops, then
- * fill an area of size (@bytes - 6) with nop instructions.
- */
-.macro alt_pad bytes
- .if ( \bytes > 0 )
- .if ( \bytes > 6 )
- jg . + \bytes
- alt_pad_fill \bytes - 6
- .else
- alt_pad_fill \bytes
- .endif
- .endif
+ .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
+ .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr. ".skip" directive takes care of proper instruction padding
- * in case @newinstr is longer than @oldinstr.
+ * @newinstr.
*/
.macro ALTERNATIVE oldinstr, newinstr, feature
.pushsection .altinstr_replacement,"ax"
770: \newinstr
771: .popsection
772: \oldinstr
-773: alt_len_check 770b, 771b
- alt_len_check 772b, 773b
- alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
-774: .pushsection .altinstructions,"a"
- alt_entry 772b, 774b, 770b, 771b, \feature
+773: .pushsection .altinstructions,"a"
+ alt_entry 772b, 773b, 770b, 771b, \feature
.popsection
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr. ".skip" directive takes care of proper instruction padding
- * in case @newinstr is longer than @oldinstr.
+ * @newinstr.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
.pushsection .altinstr_replacement,"ax"
@@ -95,17 +45,9 @@
771: \newinstr2
772: .popsection
773: \oldinstr
-774: alt_len_check 770b, 771b
- alt_len_check 771b, 772b
- alt_len_check 773b, 774b
- .if ( 771b - 770b > 772b - 771b )
- alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
- .else
- alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
- .endif
-775: .pushsection .altinstructions,"a"
- alt_entry 773b, 775b, 770b, 771b,\feature1
- alt_entry 773b, 775b, 771b, 772b,\feature2
+774: .pushsection .altinstructions,"a"
+ alt_entry 773b, 774b, 770b, 771b,\feature1
+ alt_entry 773b, 774b, 771b, 772b,\feature2
.popsection
.endm
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index 3f2856ed6808..904dd049f954 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -13,32 +13,25 @@ struct alt_instr {
s32 repl_offset; /* offset to replacement instruction */
u16 facility; /* facility bit set for replacement */
u8 instrlen; /* length of original instruction */
- u8 replacementlen; /* length of new instruction */
} __packed;
void apply_alternative_instructions(void);
void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
/*
- * |661: |662: |6620 |663:
- * +-----------+---------------------+
- * | oldinstr | oldinstr_padding |
- * | +----------+----------+
- * | | | |
- * | | >6 bytes |6/4/2 nops|
- * | |6 bytes jg----------->
- * +-----------+---------------------+
- * ^^ static padding ^^
+ * +---------------------------------+
+ * |661: |662:
+ * | oldinstr |
+ * +---------------------------------+
*
* .altinstr_replacement section
- * +---------------------+-----------+
+ * +---------------------------------+
* |6641: |6651:
* | alternative instr 1 |
- * +-----------+---------+- - - - - -+
- * |6642: |6652: |
- * | alternative instr 2 | padding
- * +---------------------+- - - - - -+
- * ^ runtime ^
+ * +---------------------------------+
+ * |6642: |6652:
+ * | alternative instr 2 |
+ * +---------------------------------+
*
* .altinstructions section
* +---------------------------------+
@@ -47,77 +40,31 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* +---------------------------------+
*/
-#define b_altinstr(num) "664"#num
-#define e_altinstr(num) "665"#num
-
-#define e_oldinstr_pad_end "663"
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
#define oldinstr_len "662b-661b"
-#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
-#define oldinstr_pad_len(num) \
- "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
- "((" altinstr_len(num) ")-(" oldinstr_len "))"
-
-#define INSTR_LEN_SANITY_CHECK(len) \
- ".if " len " > 254\n" \
- "\t.error \"cpu alternatives does not support instructions " \
- "blocks > 254 bytes\"\n" \
- ".endif\n" \
- ".if (" len ") %% 2\n" \
- "\t.error \"cpu alternatives instructions length is odd\"\n" \
- ".endif\n"
-
-#define OLDINSTR_PADDING(oldinstr, num) \
- ".if " oldinstr_pad_len(num) " > 6\n" \
- "\tjg " e_oldinstr_pad_end "f\n" \
- "6620:\n" \
- "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n" \
- "\tnopr\n" \
- ".else\n" \
- "\t.rept " oldinstr_pad_len(num) " / 6\n" \
- "\t.brcl 0,0\n" \
- "\t.endr\n" \
- "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n" \
- "\tnop\n" \
- "\t.endr\n" \
- "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n" \
- "\tnopr\n" \
- ".endr\n" \
- ".endif\n"
-
-#define OLDINSTR(oldinstr, num) \
- "661:\n\t" oldinstr "\n662:\n" \
- OLDINSTR_PADDING(oldinstr, num) \
- e_oldinstr_pad_end ":\n" \
- INSTR_LEN_SANITY_CHECK(oldinstr_len)
-
-#define OLDINSTR_2(oldinstr, num1, num2) \
- "661:\n\t" oldinstr "\n662:\n" \
- ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
- OLDINSTR_PADDING(oldinstr, num2) \
- ".else\n" \
- OLDINSTR_PADDING(oldinstr, num1) \
- ".endif\n" \
- e_oldinstr_pad_end ":\n" \
- INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR(oldinstr) \
+ "661:\n\t" oldinstr "\n662:\n"
#define ALTINSTR_ENTRY(facility, num) \
"\t.long 661b - .\n" /* old instruction */ \
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
"\t.word " __stringify(facility) "\n" /* facility bit */ \
- "\t.byte " oldinstr_total_len "\n" /* source len */ \
- "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+ "\t.byte " oldinstr_len "\n" /* instruction len */ \
+ "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \
+ "\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n"
#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
- b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
- INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, altinstr, facility) \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr, 1) \
".popsection\n" \
- OLDINSTR(oldinstr, 1) \
+ OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility, 1) \
".popsection\n"
@@ -127,7 +74,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
ALTINSTR_REPLACEMENT(altinstr1, 1) \
ALTINSTR_REPLACEMENT(altinstr2, 2) \
".popsection\n" \
- OLDINSTR_2(oldinstr, 1, 2) \
+ OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility1, 1) \
ALTINSTR_ENTRY(facility2, 2) \
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index fb62df5e16a2..f24d9591aaed 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -26,16 +26,16 @@
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
stringify_in_c(.macro extable_reg reg;) \
- stringify_in_c(.set found, 0;) \
- stringify_in_c(.set regnr, 0;) \
+ stringify_in_c(.set .Lfound, 0;) \
+ stringify_in_c(.set .Lregnr, 0;) \
stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \
stringify_in_c(.ifc "\reg", "%%\rs";) \
- stringify_in_c(.set found, 1;) \
- stringify_in_c(.short regnr;) \
+ stringify_in_c(.set .Lfound, 1;) \
+ stringify_in_c(.short .Lregnr;) \
stringify_in_c(.endif;) \
- stringify_in_c(.set regnr, regnr+1;) \
+ stringify_in_c(.set .Lregnr, .Lregnr+1;) \
stringify_in_c(.endr;) \
- stringify_in_c(.ifne (found != 1);) \
+ stringify_in_c(.ifne (.Lfound != 1);) \
stringify_in_c(.error "extable_reg: bad register argument";) \
stringify_in_c(.endif;) \
stringify_in_c(.endm;) \
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 2c057e1f3200..82de2a7c4160 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -26,14 +26,14 @@ static __always_inline void bcr_serialize(void)
asm volatile(__ASM_BCR_SERIALIZE : : : "memory");
}
-#define mb() bcr_serialize()
-#define rmb() barrier()
-#define wmb() barrier()
-#define dma_rmb() mb()
-#define dma_wmb() mb()
-#define __smp_mb() mb()
-#define __smp_rmb() rmb()
-#define __smp_wmb() wmb()
+#define __mb() bcr_serialize()
+#define __rmb() barrier()
+#define __wmb() barrier()
+#define __dma_rmb() __mb()
+#define __dma_wmb() __mb()
+#define __smp_mb() __mb()
+#define __smp_rmb() __rmb()
+#define __smp_wmb() __wmb()
#define __smp_store_release(p, v) \
do { \
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 1effac6a0152..1c4f585dd39b 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -369,7 +369,7 @@ void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev);
struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages);
/* Function from drivers/s390/cio/chsc.c */
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
int chsc_stzi(void *page, void *result, size_t size);
int chsc_sgib(u32 origin);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index cdc7ae72529d..7d6fe813ac39 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
+#include <asm/ptrace.h>
#define compat_mode_t compat_mode_t
typedef u16 compat_mode_t;
@@ -22,32 +23,8 @@ typedef u16 compat_mode_t;
(__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
})
-#define PSW32_MASK_PER 0x40000000UL
-#define PSW32_MASK_DAT 0x04000000UL
-#define PSW32_MASK_IO 0x02000000UL
-#define PSW32_MASK_EXT 0x01000000UL
-#define PSW32_MASK_KEY 0x00F00000UL
-#define PSW32_MASK_BASE 0x00080000UL /* Always one */
-#define PSW32_MASK_MCHECK 0x00040000UL
-#define PSW32_MASK_WAIT 0x00020000UL
-#define PSW32_MASK_PSTATE 0x00010000UL
-#define PSW32_MASK_ASC 0x0000C000UL
-#define PSW32_MASK_CC 0x00003000UL
-#define PSW32_MASK_PM 0x00000f00UL
-#define PSW32_MASK_RI 0x00000080UL
-
#define PSW32_MASK_USER 0x0000FF00UL
-#define PSW32_ADDR_AMODE 0x80000000UL
-#define PSW32_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20)
-
-#define PSW32_ASC_PRIMARY 0x00000000UL
-#define PSW32_ASC_ACCREG 0x00004000UL
-#define PSW32_ASC_SECONDARY 0x00008000UL
-#define PSW32_ASC_HOME 0x0000C000UL
-
#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 82388da3f95f..267a8f88e143 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -93,7 +93,9 @@ union ctlreg0 {
unsigned long tcx : 1; /* Transactional-Execution control */
unsigned long pifo : 1; /* Transactional-Execution Program-
Interruption-Filtering Override */
- unsigned long : 22;
+ unsigned long : 3;
+ unsigned long ccc : 1; /* Cryptography counter control */
+ unsigned long : 18;
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 2f0a1cacdf85..000de2b1e67a 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -9,19 +9,21 @@
#include <linux/uaccess.h>
#include <asm/timex.h>
#include <asm/fpu/api.h>
+#include <asm/pai.h>
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
void do_per_trap(struct pt_regs *regs);
-#ifdef CONFIG_DEBUG_ENTRY
-static __always_inline void arch_check_user_regs(struct pt_regs *regs)
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
{
- debug_user_asce(0);
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+ debug_user_asce(0);
+
+ pai_kernel_enter(regs);
}
-#define arch_check_user_regs arch_check_user_regs
-#endif /* CONFIG_DEBUG_ENTRY */
+#define arch_enter_from_user_mode arch_enter_from_user_mode
static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
unsigned long ti_work)
@@ -44,6 +46,8 @@ static __always_inline void arch_exit_to_user_mode(void)
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
debug_user_asce(1);
+
+ pai_kernel_exit(current_pt_regs());
}
#define arch_exit_to_user_mode arch_exit_to_user_mode
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 3f8ee257f9aa..a405b6bb89fb 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -133,6 +133,8 @@ int ipl_report_add_certificate(struct ipl_report *report, void *key,
* DIAG 308 support
*/
enum diag308_subcode {
+ DIAG308_CLEAR_RESET = 0,
+ DIAG308_LOAD_NORMAL_RESET = 1,
DIAG308_REL_HSA = 2,
DIAG308_LOAD_CLEAR = 3,
DIAG308_LOAD_NORMAL_DUMP = 4,
@@ -141,6 +143,10 @@ enum diag308_subcode {
DIAG308_LOAD_NORMAL = 7,
};
+enum diag308_subcode_flags {
+ DIAG308_FLAG_EI = 1UL << 16,
+};
+
enum diag308_rc {
DIAG308_RC_OK = 0x0001,
DIAG308_RC_NOCONFIG = 0x0102,
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 56002aeacabf..26fe5e535728 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -200,7 +200,10 @@ struct lowcore {
__u64 last_break_save_area; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
- __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
+ __u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */
+ /* Cryptography-counter designation */
+ __u64 ccd; /* 0x1500 */
+ __u8 pad_0x1508[0x1800-0x1508]; /* 0x1508 */
/* Transaction abort diagnostic block */
struct pgm_tdb pgm_tdb; /* 0x1800 */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 292083083830..af1cd3a6f406 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -101,7 +101,7 @@ void nmi_alloc_mcesa_early(u64 *mcesad);
int nmi_alloc_mcesa(u64 *mcesad);
void nmi_free_mcesa(u64 *mcesad);
-void s390_handle_mcck(void);
+void s390_handle_mcck(struct pt_regs *regs);
void __s390_handle_mcck(void);
int s390_do_machine_check(struct pt_regs *regs);
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 2cfcd5ac3a8b..d910d71b5bb5 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -54,31 +54,31 @@
.endm
.macro __DECODE_R expand,reg
- .set __decode_fail,1
+ .set .L__decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \reg,%r\r1
\expand \r1
- .set __decode_fail,0
+ .set .L__decode_fail,0
.endif
.endr
- .if __decode_fail == 1
+ .if .L__decode_fail == 1
.error "__DECODE_R failed"
.endif
.endm
.macro __DECODE_RR expand,rsave,rtarget
- .set __decode_fail,1
+ .set .L__decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rsave,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rtarget,%r\r2
\expand \r1,\r2
- .set __decode_fail,0
+ .set .L__decode_fail,0
.endif
.endr
.endif
.endr
- .if __decode_fail == 1
+ .if .L__decode_fail == 1
.error "__DECODE_RR failed"
.endif
.endm
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
new file mode 100644
index 000000000000..5b7e33ac6f0b
--- /dev/null
+++ b/arch/s390/include/asm/pai.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Processor Activity Instrumentation support for cryptography counters
+ *
+ * Copyright IBM Corp. 2022
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ */
+#ifndef _ASM_S390_PAI_H
+#define _ASM_S390_PAI_H
+
+#include <linux/jump_label.h>
+#include <asm/lowcore.h>
+#include <asm/ptrace.h>
+
+struct qpaci_info_block {
+ u64 header;
+ struct {
+ u64 : 8;
+ u64 num_cc : 8; /* # of supported crypto counters */
+ u64 : 48;
+ };
+};
+
+static inline int qpaci(struct qpaci_info_block *info)
+{
+ /* Size of info (in double words minus one) */
+ size_t size = sizeof(*info) / sizeof(u64) - 1;
+ int cc;
+
+ asm volatile(
+ " lgr 0,%[size]\n"
+ " .insn s,0xb28f0000,%[info]\n"
+ " lgr %[size],0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size)
+ :
+ : "0", "cc", "memory");
+ return cc ? (size + 1) * sizeof(u64) : 0;
+}
+
+#define PAI_CRYPTO_BASE 0x1000 /* First event number */
+#define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */
+#define PAI_CRYPTO_KERNEL_OFFSET 2048
+
+DECLARE_STATIC_KEY_FALSE(pai_key);
+
+static __always_inline void pai_kernel_enter(struct pt_regs *regs)
+{
+ if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+ return;
+ if (!static_branch_unlikely(&pai_key))
+ return;
+ if (!S390_lowcore.ccd)
+ return;
+ if (!user_mode(regs))
+ return;
+ WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET);
+}
+
+static __always_inline void pai_kernel_exit(struct pt_regs *regs)
+{
+ if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+ return;
+ if (!static_branch_unlikely(&pai_key))
+ return;
+ if (!S390_lowcore.ccd)
+ return;
+ if (!user_mode(regs))
+ return;
+ WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
+}
+
+#endif
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 5dfe47588277..3bb4e7e33a0e 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -17,9 +17,14 @@ extern debug_info_t *pci_debug_err_id;
debug_text_event(pci_debug_err_id, 0, debug_buffer); \
} while (0)
+static inline void zpci_err_hex_level(int level, void *addr, int len)
+{
+ debug_event(pci_debug_err_id, level, addr, len);
+}
+
static inline void zpci_err_hex(void *addr, int len)
{
- debug_event(pci_debug_err_id, 0, addr, len);
+ zpci_err_hex_level(0, addr, len);
}
#endif
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index d9d5350cc3ec..bf15da0fedbc 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -46,10 +46,17 @@ static inline bool test_preempt_need_resched(void)
static inline void __preempt_count_add(int val)
{
- if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
- __atomic_add_const(val, &S390_lowcore.preempt_count);
- else
- __atomic_add(val, &S390_lowcore.preempt_count);
+ /*
+ * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
+ * enabled, gcc 12 fails to handle __builtin_constant_p().
+ */
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
+ if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
+ __atomic_add_const(val, &S390_lowcore.preempt_count);
+ return;
+ }
+ }
+ __atomic_add(val, &S390_lowcore.preempt_count);
}
static inline void __preempt_count_sub(int val)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index ff1e25d515a8..add764a2be8c 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -83,6 +83,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
extern void __bpon(void);
+unsigned long vdso_size(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -94,9 +95,10 @@ extern void __bpon(void);
(_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1))
#define TASK_SIZE_MAX (-PAGE_SIZE)
-#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \
- _REGION3_SIZE : _REGION2_SIZE)
-#define STACK_TOP_MAX _REGION2_SIZE
+#define VDSO_BASE (STACK_TOP + PAGE_SIZE)
+#define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE)
+#define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE)
+#define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE)
#define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index ddb70fb13fbc..8bae33ab320a 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -71,6 +71,35 @@ enum {
&(*(struct psw_bits *)(&(__psw))); \
}))
+#define PSW32_MASK_PER 0x40000000UL
+#define PSW32_MASK_DAT 0x04000000UL
+#define PSW32_MASK_IO 0x02000000UL
+#define PSW32_MASK_EXT 0x01000000UL
+#define PSW32_MASK_KEY 0x00F00000UL
+#define PSW32_MASK_BASE 0x00080000UL /* Always one */
+#define PSW32_MASK_MCHECK 0x00040000UL
+#define PSW32_MASK_WAIT 0x00020000UL
+#define PSW32_MASK_PSTATE 0x00010000UL
+#define PSW32_MASK_ASC 0x0000C000UL
+#define PSW32_MASK_CC 0x00003000UL
+#define PSW32_MASK_PM 0x00000f00UL
+#define PSW32_MASK_RI 0x00000080UL
+
+#define PSW32_ADDR_AMODE 0x80000000UL
+#define PSW32_ADDR_INSN 0x7FFFFFFFUL
+
+#define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20)
+
+#define PSW32_ASC_PRIMARY 0x00000000UL
+#define PSW32_ASC_ACCREG 0x00004000UL
+#define PSW32_ASC_SECONDARY 0x00008000UL
+#define PSW32_ASC_HOME 0x0000C000UL
+
+typedef struct {
+ unsigned int mask;
+ unsigned int addr;
+} psw_t32 __aligned(8);
+
#define PGM_INT_CODE_MASK 0x7f
#define PGM_INT_CODE_PER 0x80
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 04cb1e7582a6..236b34b75ddb 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -87,6 +87,7 @@ struct sclp_info {
unsigned char has_diag318 : 1;
unsigned char has_sipl : 1;
unsigned char has_dirq : 1;
+ unsigned char has_iplcc : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index a7c3ccf681da..7ce584aff5bb 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -508,9 +508,21 @@ static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
*/
static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
+ /* Must be status pending. */
+ if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND))
+ return 0;
+
+ /* Must have alert status. */
+ if (!(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS))
+ return 0;
+
+ /* Must be alone or together with primary, secondary or both,
+ * => no intermediate status.
+ */
+ if (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS)
+ return 0;
+
+ return 1;
}
/**
@@ -522,10 +534,25 @@ static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
*/
static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
{
- return (scsw->cmd.fctl != 0) &&
- (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
- (scsw->cmd.actl & SCSW_ACTL_SUSPENDED));
+ /* Must indicate at least one I/O function. */
+ if (!scsw->cmd.fctl)
+ return 0;
+
+ /* Must be status pending. */
+ if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND))
+ return 0;
+
+ /* Can be status pending alone, or with any combination of primary,
+ * secondary and alert => no intermediate status.
+ */
+ if (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS))
+ return 1;
+
+ /* If intermediate, must be suspended. */
+ if (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)
+ return 1;
+
+ return 0;
}
/**
@@ -675,9 +702,21 @@ static inline int scsw_tm_is_valid_q(union scsw *scsw)
*/
static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
+ /* Must be status pending. */
+ if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND))
+ return 0;
+
+ /* Must have alert status. */
+ if (!(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS))
+ return 0;
+
+ /* Must be alone or together with primary, secondary or both,
+ * => no intermediate status.
+ */
+ if (scsw->tm.stctl & SCSW_STCTL_INTER_STATUS)
+ return 0;
+
+ return 1;
}
/**
@@ -689,11 +728,25 @@ static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
*/
static inline int scsw_tm_is_valid_pno(union scsw *scsw)
{
- return (scsw->tm.fctl != 0) &&
- (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
- ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
+ /* Must indicate at least one I/O function. */
+ if (!scsw->tm.fctl)
+ return 0;
+
+ /* Must be status pending. */
+ if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND))
+ return 0;
+
+ /* Can be status pending alone, or with any combination of primary,
+ * secondary and alert => no intermediate status.
+ */
+ if (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS))
+ return 1;
+
+ /* If intermediate, must be suspended. */
+ if (scsw->tm.actl & SCSW_ACTL_SUSPENDED)
+ return 1;
+
+ return 0;
}
/**
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 24a54443c865..37127cd7749e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -77,8 +77,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
+ kcsan_release();
asm_inline volatile(
- ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
" sth %1,%0\n"
: "=R" (((unsigned short *) &lp->lock)[1])
: "d" (0) : "cc", "memory");
diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h
index ba07463897c1..4d74d7e33340 100644
--- a/arch/s390/include/asm/stp.h
+++ b/arch/s390/include/asm/stp.h
@@ -44,8 +44,8 @@ struct stp_sstpi {
u32 : 32;
u32 ctnid[3];
u32 : 32;
- u32 todoff[4];
- u32 rsvd[48];
+ u64 todoff;
+ u32 rsvd[50];
} __packed;
struct stp_tzib {
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
index 87e6cc2aeba4..95480ed9149e 100644
--- a/arch/s390/include/asm/vx-insn.h
+++ b/arch/s390/include/asm/vx-insn.h
@@ -366,7 +366,7 @@
.macro VLM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
VX_NUM v3, \vto
- GR_NUM b2, \base /* Base register */
+ GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \hint, 0x36, v1, v3
@@ -376,7 +376,7 @@
.macro VST vr1, disp, index="%r0", base
VX_NUM v1, \vr1
GR_NUM x2, \index
- GR_NUM b2, \base /* Base register */
+ GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (x2&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x0E, v1
@@ -386,7 +386,7 @@
.macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
VX_NUM v3, \vto
- GR_NUM b2, \base /* Base register */
+ GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \hint, 0x3E, v1, v3
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 7349e96d28a0..924b876f992c 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -171,7 +171,7 @@ struct pkey_skey2pkey {
#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
/*
- * Verify the given CCA AES secure key for being able to be useable with
+ * Verify the given CCA AES secure key for being able to be usable with
* the pkey module. Check for correct key type and check for having at
* least one crypto card being able to handle this key (master key
* or old master key verification pattern matches).
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 2f04a5499d74..d83713f67530 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -4,7 +4,7 @@
*
* zcrypt 2.2.1 (user-visible header)
*
- * Copyright IBM Corp. 2001, 2019
+ * Copyright IBM Corp. 2001, 2022
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -85,7 +85,7 @@ struct ica_rsa_modexpo_crt {
struct CPRBX {
__u16 cprb_len; /* CPRB length 220 */
__u8 cprb_ver_id; /* CPRB version id. 0x02 */
- __u8 pad_000[3]; /* Alignment pad bytes */
+ __u8 _pad_000[3]; /* Alignment pad bytes */
__u8 func_id[2]; /* function id 0x5432 */
__u8 cprb_flags[4]; /* Flags */
__u32 req_parml; /* request parameter buffer len */
@@ -95,19 +95,19 @@ struct CPRBX {
__u32 rpl_datal; /* reply data block len */
__u32 rpld_datal; /* replied data block len */
__u32 req_extbl; /* request extension block len */
- __u8 pad_001[4]; /* reserved */
+ __u8 _pad_001[4]; /* reserved */
__u32 rpld_extbl; /* replied extension block len */
- __u8 padx000[16 - sizeof(__u8 *)];
+ __u8 _pad_002[16 - sizeof(__u8 *)];
__u8 __user *req_parmb; /* request parm block 'address' */
- __u8 padx001[16 - sizeof(__u8 *)];
+ __u8 _pad_003[16 - sizeof(__u8 *)];
__u8 __user *req_datab; /* request data block 'address' */
- __u8 padx002[16 - sizeof(__u8 *)];
+ __u8 _pad_004[16 - sizeof(__u8 *)];
__u8 __user *rpl_parmb; /* reply parm block 'address' */
- __u8 padx003[16 - sizeof(__u8 *)];
+ __u8 _pad_005[16 - sizeof(__u8 *)];
__u8 __user *rpl_datab; /* reply data block 'address' */
- __u8 padx004[16 - sizeof(__u8 *)];
+ __u8 _pad_006[16 - sizeof(__u8 *)];
__u8 __user *req_extb; /* request extension block 'addr'*/
- __u8 padx005[16 - sizeof(__u8 *)];
+ __u8 _pad_007[16 - sizeof(__u8 *)];
__u8 __user *rpl_extb; /* reply extension block 'address'*/
__u16 ccp_rtcode; /* server return code */
__u16 ccp_rscode; /* server reason code */
@@ -115,12 +115,10 @@ struct CPRBX {
__u8 logon_id[8]; /* Logon Identifier */
__u8 mac_value[8]; /* Mac Value */
__u8 mac_content_flgs; /* Mac content flag byte */
- __u8 pad_002; /* Alignment */
+ __u8 _pad_008; /* Alignment */
__u16 domain; /* Domain */
- __u8 usage_domain[4]; /* Usage domain */
- __u8 cntrl_domain[4]; /* Control domain */
- __u8 S390enf_mask[4]; /* S/390 enforcement mask */
- __u8 pad_004[36]; /* reserved */
+ __u8 _pad_009[12]; /* reserved, checked for zeros */
+ __u8 _pad_010[36]; /* reserved */
} __attribute__((packed));
/**
@@ -238,8 +236,8 @@ struct zcrypt_device_matrix_ext {
};
#define AUTOSELECT 0xFFFFFFFF
-#define AUTOSEL_AP ((__u16) 0xFFFF)
-#define AUTOSEL_DOM ((__u16) 0xFFFF)
+#define AUTOSEL_AP ((__u16)0xFFFF)
+#define AUTOSEL_DOM ((__u16)0xFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z'
@@ -305,12 +303,12 @@ struct zcrypt_device_matrix_ext {
/**
* Supported ioctl calls
*/
-#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
-#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
-#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
-#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
+#define ICARSAMODEXPO _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
+#define ICARSACRT _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
+#define ZSECSENDCPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
+#define ZSENDEP11CPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
-#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0)
+#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0)
#define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT])
#define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT])
#define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT])
@@ -352,7 +350,7 @@ struct zcrypt_device_matrix {
};
/* Deprecated: use ZCRYPT_DEVICE_STATUS */
-#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
+#define ZDEVICESTATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
/* Deprecated: use ZCRYPT_STATUS_MASK */
#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64])
/* Deprecated: use ZCRYPT_QDEPTH_MASK */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c8d1b6aa823e..5851041bb214 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
+obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index cce0ddee2d02..e7bca29f9c34 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -7,8 +7,6 @@
#include <asm/facility.h>
#include <asm/nospec-branch.h>
-#define MAX_PATCH_LEN (255 - 1)
-
static int __initdata_or_module alt_instr_disabled;
static int __init disable_alternative_instructions(char *str)
@@ -19,85 +17,30 @@ static int __init disable_alternative_instructions(char *str)
early_param("noaltinstr", disable_alternative_instructions);
-struct brcl_insn {
- u16 opc;
- s32 disp;
-} __packed;
-
-static u16 __initdata_or_module nop16 = 0x0700;
-static u32 __initdata_or_module nop32 = 0x47000000;
-static struct brcl_insn __initdata_or_module nop48 = {
- 0xc004, 0
-};
-
-static const void *nops[] __initdata_or_module = {
- &nop16,
- &nop32,
- &nop48
-};
-
-static void __init_or_module add_jump_padding(void *insns, unsigned int len)
-{
- struct brcl_insn brcl = {
- 0xc0f4,
- len / 2
- };
-
- memcpy(insns, &brcl, sizeof(brcl));
- insns += sizeof(brcl);
- len -= sizeof(brcl);
-
- while (len > 0) {
- memcpy(insns, &nop16, 2);
- insns += 2;
- len -= 2;
- }
-}
-
-static void __init_or_module add_padding(void *insns, unsigned int len)
-{
- if (len > 6)
- add_jump_padding(insns, len);
- else if (len >= 2)
- memcpy(insns, nops[len / 2 - 1], len);
-}
-
static void __init_or_module __apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
struct alt_instr *a;
u8 *instr, *replacement;
- u8 insnbuf[MAX_PATCH_LEN];
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
*/
for (a = start; a < end; a++) {
- int insnbuf_sz = 0;
-
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
- if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ if (unlikely(a->instrlen % 2)) {
WARN_ONCE(1, "cpu alternatives instructions length is "
"odd, skipping patching\n");
continue;
}
- memcpy(insnbuf, replacement, a->replacementlen);
- insnbuf_sz = a->replacementlen;
-
- if (a->instrlen > a->replacementlen) {
- add_padding(insnbuf + a->replacementlen,
- a->instrlen - a->replacementlen);
- insnbuf_sz += a->instrlen - a->replacementlen;
- }
-
- s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ s390_kernel_write(instr, replacement, a->instrlen);
}
}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 64509e7dbd3b..ef23739b277c 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -5,69 +5,59 @@
#include <linux/compat.h>
#include <linux/socket.h>
#include <linux/syscalls.h>
+#include <asm/ptrace.h>
-/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
-/* to a 64 bit pointer */
-#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
-#define AA(__x) \
- ((unsigned long)(__x))
+/*
+ * Macro that masks the high order bit of a 32 bit pointer and
+ * converts it to a 64 bit pointer.
+ */
+#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
+#define AA(__x) ((unsigned long)(__x))
/* Now 32bit compatibility types */
struct ipc_kludge_32 {
- __u32 msgp; /* pointer */
- __s32 msgtyp;
+ __u32 msgp; /* pointer */
+ __s32 msgtyp;
};
/* asm/sigcontext.h */
-typedef union
-{
- __u64 d;
- __u32 f;
+typedef union {
+ __u64 d;
+ __u32 f;
} freg_t32;
-typedef struct
-{
+typedef struct {
unsigned int fpc;
unsigned int pad;
- freg_t32 fprs[__NUM_FPRS];
+ freg_t32 fprs[__NUM_FPRS];
} _s390_fp_regs32;
-typedef struct
-{
- __u32 mask;
- __u32 addr;
-} _psw_t32 __attribute__ ((aligned(8)));
-
-typedef struct
-{
- _psw_t32 psw;
+typedef struct {
+ psw_t32 psw;
__u32 gprs[__NUM_GPRS];
__u32 acrs[__NUM_ACRS];
} _s390_regs_common32;
-typedef struct
-{
+typedef struct {
_s390_regs_common32 regs;
- _s390_fp_regs32 fpregs;
+ _s390_fp_regs32 fpregs;
} _sigregs32;
-typedef struct
-{
- __u32 gprs_high[__NUM_GPRS];
- __u64 vxrs_low[__NUM_VXRS_LOW];
- __vector128 vxrs_high[__NUM_VXRS_HIGH];
- __u8 __reserved[128];
+typedef struct {
+ __u32 gprs_high[__NUM_GPRS];
+ __u64 vxrs_low[__NUM_VXRS_LOW];
+ __vector128 vxrs_high[__NUM_VXRS_HIGH];
+ __u8 __reserved[128];
} _sigregs_ext32;
#define _SIGCONTEXT_NSIG32 64
#define _SIGCONTEXT_NSIG_BPW32 32
#define __SIGNAL_FRAMESIZE32 96
-#define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2)
+#define _SIGMASK_COPY_SIZE32 (sizeof(u32) * 2)
-struct sigcontext32
-{
+struct sigcontext32 {
__u32 oldmask[_COMPAT_NSIG_WORDS];
- __u32 sregs; /* pointer */
+ __u32 sregs; /* pointer */
};
/* asm/signal.h */
@@ -75,11 +65,11 @@ struct sigcontext32
/* asm/ucontext.h */
struct ucontext32 {
__u32 uc_flags;
- __u32 uc_link; /* pointer */
+ __u32 uc_link; /* pointer */
compat_stack_t uc_stack;
_sigregs32 uc_mcontext;
compat_sigset_t uc_sigmask;
- /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
+ /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(compat_sigset_t)];
_sigregs_ext32 uc_mcontext_ext;
};
@@ -88,25 +78,6 @@ struct stat64_emu31;
struct mmap_arg_struct_emu31;
struct fadvise64_64_args;
-long compat_sys_s390_chown16(const char __user *filename, u16 user, u16 group);
-long compat_sys_s390_lchown16(const char __user *filename, u16 user, u16 group);
-long compat_sys_s390_fchown16(unsigned int fd, u16 user, u16 group);
-long compat_sys_s390_setregid16(u16 rgid, u16 egid);
-long compat_sys_s390_setgid16(u16 gid);
-long compat_sys_s390_setreuid16(u16 ruid, u16 euid);
-long compat_sys_s390_setuid16(u16 uid);
-long compat_sys_s390_setresuid16(u16 ruid, u16 euid, u16 suid);
-long compat_sys_s390_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
-long compat_sys_s390_setresgid16(u16 rgid, u16 egid, u16 sgid);
-long compat_sys_s390_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
-long compat_sys_s390_setfsuid16(u16 uid);
-long compat_sys_s390_setfsgid16(u16 gid);
-long compat_sys_s390_getgroups16(int gidsetsize, u16 __user *grouplist);
-long compat_sys_s390_setgroups16(int gidsetsize, u16 __user *grouplist);
-long compat_sys_s390_getuid16(void);
-long compat_sys_s390_geteuid16(void);
-long compat_sys_s390_getgid16(void);
-long compat_sys_s390_getegid16(void);
long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low);
long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low);
long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low);
@@ -118,8 +89,8 @@ long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbu
long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag);
long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg);
long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg);
-long compat_sys_s390_read(unsigned int fd, char __user * buf, compat_size_t count);
-long compat_sys_s390_write(unsigned int fd, const char __user * buf, compat_size_t count);
+long compat_sys_s390_read(unsigned int fd, char __user *buf, compat_size_t count);
+long compat_sys_s390_write(unsigned int fd, const char __user *buf, compat_size_t count);
long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise);
long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 59b69c8ab5e1..df41132ccd06 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -53,19 +53,19 @@ STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_LPP_OFFSET = __LC_LPP
.macro STBEAR address
- ALTERNATIVE "", ".insn s,0xb2010000,\address", 193
+ ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
.endm
.macro LBEAR address
- ALTERNATIVE "", ".insn s,0xb2000000,\address", 193
+ ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
.endm
.macro LPSWEY address,lpswe
- ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193
+ ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
.endm
.macro MBEAR reg
- ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
+ ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
.endm
.macro CHECK_STACK savearea
@@ -121,16 +121,16 @@ _LPP_OFFSET = __LC_LPP
.endm
.macro BPOFF
- ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82
+ ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
.endm
.macro BPON
- ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82
+ ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
.endm
.macro BPENTER tif_ptr,tif_mask
ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
- "", 82
+ "j .+12; nop; nop", 82
.endm
.macro BPEXIT tif_ptr,tif_mask
@@ -172,9 +172,19 @@ _LPP_OFFSET = __LC_LPP
lgr %r14,\reg
larl %r13,\start
slgr %r14,%r13
- lghi %r13,\end - \start
- clgr %r14,%r13
+#ifdef CONFIG_AS_IS_LLVM
+ clgfrl %r14,.Lrange_size\@
+#else
+ clgfi %r14,\end - \start
+#endif
jhe \outside_label
+#ifdef CONFIG_AS_IS_LLVM
+ .section .rodata, "a"
+ .align 4
+.Lrange_size\@:
+ .long \end - \start
+ .previous
+#endif
.endm
.macro SIEEXIT
@@ -226,7 +236,7 @@ ENTRY(__switch_to)
aghi %r3,__TASK_pid
mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
- ALTERNATIVE "", "lpp _LPP_OFFSET", 40
+ ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
BR_EX %r14
ENDPROC(__switch_to)
@@ -473,10 +483,7 @@ ENTRY(\name)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
MBEAR %r11
stmg %r8,%r9,__PT_PSW(%r11)
- tm %r8,0x0001 # coming from user space?
- jno 1f
- lctlg %c1,%c1,__LC_KERNEL_ASCE
-1: lgr %r2,%r11 # pass pointer to pt_regs
+ lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,\handler
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tmhh %r8,0x0001 # returning to user ?
@@ -602,6 +609,7 @@ ENTRY(mcck_int_handler)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
+ lgr %r2,%r11
lgr %r15,%r1
brasl %r14,s390_handle_mcck
.Lmcck_return:
@@ -612,7 +620,7 @@ ENTRY(mcck_int_handler)
jno 0f
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
-0: ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
+0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
LBEAR 0(%r12)
lmg %r11,%r15,__PT_R11(%r11)
LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
@@ -648,7 +656,7 @@ ENTRY(mcck_int_handler)
ENDPROC(mcck_int_handler)
ENTRY(restart_int_handler)
- ALTERNATIVE "", "lpp _LPP_OFFSET", 40
+ ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 3033f616e256..45393919fe61 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -205,7 +205,7 @@ static void show_msi_interrupt(struct seq_file *p, int irq)
unsigned long flags;
int cpu;
- irq_lock_sparse();
+ rcu_read_lock();
desc = irq_to_desc(irq);
if (!desc)
goto out;
@@ -224,7 +224,7 @@ static void show_msi_interrupt(struct seq_file *p, int irq)
seq_putc(p, '\n');
raw_spin_unlock_irqrestore(&desc->lock, flags);
out:
- irq_unlock_sparse();
+ rcu_read_unlock();
}
/*
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 6ebf02e15c85..ab761c008f98 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -26,8 +26,10 @@
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
+#include <asm/sclp.h>
-typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
+typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long,
+ unsigned long);
extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len;
@@ -243,6 +245,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
*/
static void __do_machine_kexec(void *data)
{
+ unsigned long diag308_subcode;
relocate_kernel_t data_mover;
struct kimage *image = data;
@@ -251,7 +254,10 @@ static void __do_machine_kexec(void *data)
__arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
/* Call the moving routine */
- (*data_mover)(&image->head, image->start);
+ diag308_subcode = DIAG308_CLEAR_RESET;
+ if (sclp.has_iplcc)
+ diag308_subcode |= DIAG308_FLAG_EI;
+ (*data_mover)(&image->head, image->start, diag308_subcode);
/* Die if kexec returns */
disabled_wait();
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index fc60e29b8690..53ed3884fe64 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -29,6 +29,8 @@
#include <asm/switch_to.h>
#include <asm/ctl_reg.h>
#include <asm/asm-offsets.h>
+#include <asm/pai.h>
+
#include <linux/kvm_host.h>
struct mcck_struct {
@@ -169,10 +171,12 @@ void __s390_handle_mcck(void)
}
}
-void noinstr s390_handle_mcck(void)
+void noinstr s390_handle_mcck(struct pt_regs *regs)
{
trace_hardirqs_off();
+ pai_kernel_enter(regs);
__s390_handle_mcck();
+ pai_kernel_exit(regs);
trace_hardirqs_on();
}
/*
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 52c1fe23b823..0d64aafd158f 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -295,6 +295,76 @@ CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z16, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z16, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z16, CRSTE_1MB_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z16, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z16, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z16, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z16, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z16, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z16, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z16, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z16, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z16, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z16, DCW_REQ, 0x0091);
+CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_CHIP_HIT, 0x0093);
+CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_DRAWER_HIT, 0x0094);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP, 0x0095);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_IV, 0x0096);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_CHIP_HIT, 0x0097);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE, 0x0099);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER, 0x009a);
+CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER, 0x009b);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_MEMORY, 0x009c);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE_MEMORY, 0x009d);
+CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER_MEMORY, 0x009e);
+CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER_MEMORY, 0x009f);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_IV, 0x00a0);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_IV, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_IV, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z16, ICW_REQ, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_CHIP_HIT, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_DRAWER_HIT, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_IV, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_CHIP_HIT, 0x00af);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE, 0x00b1);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER, 0x00b2);
+CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER, 0x00b3);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_MEMORY, 0x00b4);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE_MEMORY, 0x00b5);
+CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER_MEMORY, 0x00b6);
+CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER_MEMORY, 0x00b7);
+CPUMF_EVENT_ATTR(cf_z16, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
+CPUMF_EVENT_ATTR(cf_z16, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z16, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z16, LAST_HOST_TRANSLATIONS, 0x00e8);
+CPUMF_EVENT_ATTR(cf_z16, TX_NC_TABORT, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_NO_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_SPECIAL, 0x00f6);
+CPUMF_EVENT_ATTR(cf_z16, DFLT_ACCESS, 0x00f8);
+CPUMF_EVENT_ATTR(cf_z16, DFLT_CYCLES, 0x00fd);
+CPUMF_EVENT_ATTR(cf_z16, SORTL, 0x0100);
+CPUMF_EVENT_ATTR(cf_z16, DFLT_CC, 0x0109);
+CPUMF_EVENT_ATTR(cf_z16, DFLT_CCFINISH, 0x010a);
+CPUMF_EVENT_ATTR(cf_z16, NNPA_INVOCATIONS, 0x010b);
+CPUMF_EVENT_ATTR(cf_z16, NNPA_COMPLETIONS, 0x010c);
+CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
+CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
+CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
@@ -635,6 +705,80 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z16, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z16, CRSTE_1MB_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z16, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z16, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z16, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z16, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z16, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z16, DCW_REQ),
+ CPUMF_EVENT_PTR(cf_z16, DCW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z16, DCW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, DCW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_IV),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_IV),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, ICW_REQ),
+ CPUMF_EVENT_PTR(cf_z16, ICW_REQ_IV),
+ CPUMF_EVENT_PTR(cf_z16, ICW_REQ_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, ICW_REQ_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_IV),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_CHIP_HIT),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER),
+ CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER_MEMORY),
+ CPUMF_EVENT_PTR(cf_z16, BCD_DFP_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z16, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z16, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z16, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z16, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z16, DFLT_ACCESS),
+ CPUMF_EVENT_PTR(cf_z16, DFLT_CYCLES),
+ CPUMF_EVENT_PTR(cf_z16, SORTL),
+ CPUMF_EVENT_PTR(cf_z16, DFLT_CC),
+ CPUMF_EVENT_PTR(cf_z16, DFLT_CCFINISH),
+ CPUMF_EVENT_PTR(cf_z16, NNPA_INVOCATIONS),
+ CPUMF_EVENT_PTR(cf_z16, NNPA_COMPLETIONS),
+ CPUMF_EVENT_PTR(cf_z16, NNPA_WAIT_LOCK),
+ CPUMF_EVENT_PTR(cf_z16, NNPA_HOLD_LOCK),
+ CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -749,6 +893,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 0x8562:
model = cpumcf_z15_pmu_event_attr;
break;
+ case 0x3931:
+ case 0x3932:
+ model = cpumcf_z16_pmu_event_attr;
+ break;
default:
model = none;
break;
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
new file mode 100644
index 000000000000..8c1545946d85
--- /dev/null
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Performance event support - Processor Activity Instrumentation Facility
+ *
+ * Copyright IBM Corp. 2022
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ */
+#define KMSG_COMPONENT "pai_crypto"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/perf_event.h>
+
+#include <asm/ctl_reg.h>
+#include <asm/pai.h>
+#include <asm/debug.h>
+
+static debug_info_t *cfm_dbg;
+static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
+ /* extracted with QPACI instruction */
+
+DEFINE_STATIC_KEY_FALSE(pai_key);
+
+struct pai_userdata {
+ u16 num;
+ u64 value;
+} __packed;
+
+struct paicrypt_map {
+ unsigned long *page; /* Page for CPU to store counters */
+ struct pai_userdata *save; /* Page to store no-zero counters */
+ unsigned int users; /* # of PAI crypto users */
+ unsigned int sampler; /* # of PAI crypto samplers */
+ unsigned int counter; /* # of PAI crypto counters */
+ struct perf_event *event; /* Perf event for sampling */
+};
+
+static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
+
+/* Release the PMU if event is the last perf event */
+static DEFINE_MUTEX(pai_reserve_mutex);
+
+/* Adjust usage counters and remove allocated memory when all users are
+ * gone.
+ */
+static void paicrypt_event_destroy(struct perf_event *event)
+{
+ struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
+
+ cpump->event = NULL;
+ static_branch_dec(&pai_key);
+ mutex_lock(&pai_reserve_mutex);
+ if (event->attr.sample_period)
+ cpump->sampler -= 1;
+ else
+ cpump->counter -= 1;
+ debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d"
+ " sampler %d counter %d\n", __func__,
+ event->attr.config, event->cpu, cpump->sampler,
+ cpump->counter);
+ if (!cpump->counter && !cpump->sampler) {
+ debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
+ __func__, (unsigned long)cpump->page,
+ cpump->save);
+ free_page((unsigned long)cpump->page);
+ cpump->page = NULL;
+ kvfree(cpump->save);
+ cpump->save = NULL;
+ }
+ mutex_unlock(&pai_reserve_mutex);
+}
+
+static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
+{
+ if (kernel)
+ nr += PAI_CRYPTO_MAXCTR;
+ return cpump->page[nr];
+}
+
+/* Read the counter values. Return value from location in CMP. For event
+ * CRYPTO_ALL sum up all events.
+ */
+static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
+{
+ struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ u64 sum = 0;
+ int i;
+
+ if (event->attr.config != PAI_CRYPTO_BASE) {
+ return paicrypt_getctr(cpump,
+ event->attr.config - PAI_CRYPTO_BASE,
+ kernel);
+ }
+
+ for (i = 1; i <= paicrypt_cnt; i++) {
+ u64 val = paicrypt_getctr(cpump, i, kernel);
+
+ if (!val)
+ continue;
+ sum += val;
+ }
+ return sum;
+}
+
+static u64 paicrypt_getall(struct perf_event *event)
+{
+ u64 sum = 0;
+
+ if (!event->attr.exclude_kernel)
+ sum += paicrypt_getdata(event, true);
+ if (!event->attr.exclude_user)
+ sum += paicrypt_getdata(event, false);
+
+ return sum;
+}
+
+/* Used to avoid races in checking concurrent access of counting and
+ * sampling for crypto events
+ *
+ * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
+ * allowed and when this event is running, no counting event is allowed.
+ * Several counting events are allowed in parallel, but no sampling event
+ * is allowed while one (or more) counting events are running.
+ *
+ * This function is called in process context and it is save to block.
+ * When the event initialization functions fails, no other call back will
+ * be invoked.
+ *
+ * Allocate the memory for the event.
+ */
+static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
+{
+ unsigned int *use_ptr;
+ int rc = 0;
+
+ mutex_lock(&pai_reserve_mutex);
+ if (a->sample_period) { /* Sampling requested */
+ use_ptr = &cpump->sampler;
+ if (cpump->counter || cpump->sampler)
+ rc = -EBUSY; /* ... sampling/counting active */
+ } else { /* Counting requested */
+ use_ptr = &cpump->counter;
+ if (cpump->sampler)
+ rc = -EBUSY; /* ... and sampling active */
+ }
+ if (rc)
+ goto unlock;
+
+ /* Allocate memory for counter page and counter extraction.
+ * Only the first counting event has to allocate a page.
+ */
+ if (cpump->page)
+ goto unlock;
+
+ rc = -ENOMEM;
+ cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!cpump->page)
+ goto unlock;
+ cpump->save = kvmalloc_array(paicrypt_cnt + 1,
+ sizeof(struct pai_userdata), GFP_KERNEL);
+ if (!cpump->save) {
+ free_page((unsigned long)cpump->page);
+ cpump->page = NULL;
+ goto unlock;
+ }
+ rc = 0;
+
+unlock:
+ /* If rc is non-zero, do not increment counter/sampler. */
+ if (!rc)
+ *use_ptr += 1;
+ debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx sampler %d"
+ " counter %d page %#lx save %p rc %d\n", __func__,
+ a->sample_period, cpump->sampler, cpump->counter,
+ (unsigned long)cpump->page, cpump->save, rc);
+ mutex_unlock(&pai_reserve_mutex);
+ return rc;
+}
+
+/* Might be called on different CPU than the one the event is intended for. */
+static int paicrypt_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *a = &event->attr;
+ struct paicrypt_map *cpump;
+ int rc;
+
+ /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
+ if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
+ return -ENOENT;
+ /* PAI crypto event must be valid */
+ if (a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
+ return -EINVAL;
+ /* Allow only CPU wide operation, no process context for now. */
+ if (event->hw.target || event->cpu == -1)
+ return -ENOENT;
+ /* Allow only CRYPTO_ALL for sampling. */
+ if (a->sample_period && a->config != PAI_CRYPTO_BASE)
+ return -EINVAL;
+
+ cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
+ rc = paicrypt_busy(a, cpump);
+ if (rc)
+ return rc;
+
+ cpump->event = event;
+ event->destroy = paicrypt_event_destroy;
+
+ if (a->sample_period) {
+ a->sample_period = 1;
+ a->freq = 0;
+ /* Register for paicrypt_sched_task() to be called */
+ event->attach_state |= PERF_ATTACH_SCHED_CB;
+ /* Add raw data which contain the memory mapped counters */
+ a->sample_type |= PERF_SAMPLE_RAW;
+ /* Turn off inheritance */
+ a->inherit = 0;
+ }
+
+ static_branch_inc(&pai_key);
+ return 0;
+}
+
+static void paicrypt_read(struct perf_event *event)
+{
+ u64 prev, new, delta;
+
+ prev = local64_read(&event->hw.prev_count);
+ new = paicrypt_getall(event);
+ local64_set(&event->hw.prev_count, new);
+ delta = (prev <= new) ? new - prev
+ : (-1ULL - prev) + new + 1; /* overflow */
+ local64_add(delta, &event->count);
+}
+
+static void paicrypt_start(struct perf_event *event, int flags)
+{
+ u64 sum;
+
+ sum = paicrypt_getall(event); /* Get current value */
+ local64_set(&event->hw.prev_count, sum);
+ local64_set(&event->count, 0);
+}
+
+static int paicrypt_add(struct perf_event *event, int flags)
+{
+ struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ unsigned long ccd;
+
+ if (cpump->users++ == 0) {
+ ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
+ WRITE_ONCE(S390_lowcore.ccd, ccd);
+ __ctl_set_bit(0, 50);
+ }
+ cpump->event = event;
+ if (flags & PERF_EF_START && !event->attr.sample_period) {
+ /* Only counting needs initial counter value */
+ paicrypt_start(event, PERF_EF_RELOAD);
+ }
+ event->hw.state = 0;
+ if (event->attr.sample_period)
+ perf_sched_cb_inc(event->pmu);
+ return 0;
+}
+
+static void paicrypt_stop(struct perf_event *event, int flags)
+{
+ paicrypt_read(event);
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static void paicrypt_del(struct perf_event *event, int flags)
+{
+ struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+
+ if (event->attr.sample_period)
+ perf_sched_cb_dec(event->pmu);
+ if (!event->attr.sample_period)
+ /* Only counting needs to read counter */
+ paicrypt_stop(event, PERF_EF_UPDATE);
+ if (cpump->users-- == 1) {
+ __ctl_clear_bit(0, 50);
+ WRITE_ONCE(S390_lowcore.ccd, 0);
+ }
+}
+
+/* Create raw data and save it in buffer. Returns number of bytes copied.
+ * Saves only positive counter entries of the form
+ * 2 bytes: Number of counter
+ * 8 bytes: Value of counter
+ */
+static size_t paicrypt_copy(struct pai_userdata *userdata,
+ struct paicrypt_map *cpump,
+ bool exclude_user, bool exclude_kernel)
+{
+ int i, outidx = 0;
+
+ for (i = 1; i <= paicrypt_cnt; i++) {
+ u64 val = 0;
+
+ if (!exclude_kernel)
+ val += paicrypt_getctr(cpump, i, true);
+ if (!exclude_user)
+ val += paicrypt_getctr(cpump, i, false);
+ if (val) {
+ userdata[outidx].num = i;
+ userdata[outidx].value = val;
+ outidx++;
+ }
+ }
+ return outidx * sizeof(struct pai_userdata);
+}
+
+static int paicrypt_push_sample(void)
+{
+ struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
+ struct perf_event *event = cpump->event;
+ struct perf_sample_data data;
+ struct perf_raw_record raw;
+ struct pt_regs regs;
+ size_t rawsize;
+ int overflow;
+
+ if (!cpump->event) /* No event active */
+ return 0;
+ rawsize = paicrypt_copy(cpump->save, cpump,
+ cpump->event->attr.exclude_user,
+ cpump->event->attr.exclude_kernel);
+ if (!rawsize) /* No incremented counters */
+ return 0;
+
+ /* Setup perf sample */
+ memset(&regs, 0, sizeof(regs));
+ memset(&raw, 0, sizeof(raw));
+ memset(&data, 0, sizeof(data));
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ if (event->attr.sample_type & PERF_SAMPLE_TID) {
+ data.tid_entry.pid = task_tgid_nr(current);
+ data.tid_entry.tid = task_pid_nr(current);
+ }
+ if (event->attr.sample_type & PERF_SAMPLE_TIME)
+ data.time = event->clock();
+ if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
+ data.id = event->id;
+ if (event->attr.sample_type & PERF_SAMPLE_CPU) {
+ data.cpu_entry.cpu = smp_processor_id();
+ data.cpu_entry.reserved = 0;
+ }
+ if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ raw.frag.size = rawsize;
+ raw.frag.data = cpump->save;
+ raw.size = raw.frag.size;
+ data.raw = &raw;
+ }
+
+ overflow = perf_event_overflow(event, &data, &regs);
+ perf_event_update_userpage(event);
+ /* Clear lowcore page after read */
+ memset(cpump->page, 0, PAGE_SIZE);
+ return overflow;
+}
+
+/* Called on schedule-in and schedule-out. No access to event structure,
+ * but for sampling only event CRYPTO_ALL is allowed.
+ */
+static void paicrypt_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+ /* We started with a clean page on event installation. So read out
+ * results on schedule_out and if page was dirty, clear values.
+ */
+ if (!sched_in)
+ paicrypt_push_sample();
+}
+
+/* Attribute definitions for paicrypt interface. As with other CPU
+ * Measurement Facilities, there is one attribute per mapped counter.
+ * The number of mapped counters may vary per machine generation. Use
+ * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
+ * to determine the number of mapped counters. The instructions returns
+ * a positive number, which is the highest number of supported counters.
+ * All counters less than this number are also supported, there are no
+ * holes. A returned number of zero means no support for mapped counters.
+ *
+ * The identification of the counter is a unique number. The chosen range
+ * is 0x1000 + offset in mapped kernel page.
+ * All CPU Measurement Facility counters identifiers must be unique and
+ * the numbers from 0 to 496 are already used for the CPU Measurement
+ * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
+ * used for the CPU Measurement Sampling facility.
+ */
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *paicrypt_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group paicrypt_events_group = {
+ .name = "events",
+ .attrs = NULL /* Filled in attr_event_init() */
+};
+
+static struct attribute_group paicrypt_format_group = {
+ .name = "format",
+ .attrs = paicrypt_format_attr,
+};
+
+static const struct attribute_group *paicrypt_attr_groups[] = {
+ &paicrypt_events_group,
+ &paicrypt_format_group,
+ NULL,
+};
+
+/* Performance monitoring unit for mapped counters */
+static struct pmu paicrypt = {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = paicrypt_event_init,
+ .add = paicrypt_add,
+ .del = paicrypt_del,
+ .start = paicrypt_start,
+ .stop = paicrypt_stop,
+ .read = paicrypt_read,
+ .sched_task = paicrypt_sched_task,
+ .attr_groups = paicrypt_attr_groups
+};
+
+/* List of symbolic PAI counter names. */
+static const char * const paicrypt_ctrnames[] = {
+ [0] = "CRYPTO_ALL",
+ [1] = "KM_DEA",
+ [2] = "KM_TDEA_128",
+ [3] = "KM_TDEA_192",
+ [4] = "KM_ENCRYPTED_DEA",
+ [5] = "KM_ENCRYPTED_TDEA_128",
+ [6] = "KM_ENCRYPTED_TDEA_192",
+ [7] = "KM_AES_128",
+ [8] = "KM_AES_192",
+ [9] = "KM_AES_256",
+ [10] = "KM_ENCRYPTED_AES_128",
+ [11] = "KM_ENCRYPTED_AES_192",
+ [12] = "KM_ENCRYPTED_AES_256",
+ [13] = "KM_XTS_AES_128",
+ [14] = "KM_XTS_AES_256",
+ [15] = "KM_XTS_ENCRYPTED_AES_128",
+ [16] = "KM_XTS_ENCRYPTED_AES_256",
+ [17] = "KMC_DEA",
+ [18] = "KMC_TDEA_128",
+ [19] = "KMC_TDEA_192",
+ [20] = "KMC_ENCRYPTED_DEA",
+ [21] = "KMC_ENCRYPTED_TDEA_128",
+ [22] = "KMC_ENCRYPTED_TDEA_192",
+ [23] = "KMC_AES_128",
+ [24] = "KMC_AES_192",
+ [25] = "KMC_AES_256",
+ [26] = "KMC_ENCRYPTED_AES_128",
+ [27] = "KMC_ENCRYPTED_AES_192",
+ [28] = "KMC_ENCRYPTED_AES_256",
+ [29] = "KMC_PRNG",
+ [30] = "KMA_GCM_AES_128",
+ [31] = "KMA_GCM_AES_192",
+ [32] = "KMA_GCM_AES_256",
+ [33] = "KMA_GCM_ENCRYPTED_AES_128",
+ [34] = "KMA_GCM_ENCRYPTED_AES_192",
+ [35] = "KMA_GCM_ENCRYPTED_AES_256",
+ [36] = "KMF_DEA",
+ [37] = "KMF_TDEA_128",
+ [38] = "KMF_TDEA_192",
+ [39] = "KMF_ENCRYPTED_DEA",
+ [40] = "KMF_ENCRYPTED_TDEA_128",
+ [41] = "KMF_ENCRYPTED_TDEA_192",
+ [42] = "KMF_AES_128",
+ [43] = "KMF_AES_192",
+ [44] = "KMF_AES_256",
+ [45] = "KMF_ENCRYPTED_AES_128",
+ [46] = "KMF_ENCRYPTED_AES_192",
+ [47] = "KMF_ENCRYPTED_AES_256",
+ [48] = "KMCTR_DEA",
+ [49] = "KMCTR_TDEA_128",
+ [50] = "KMCTR_TDEA_192",
+ [51] = "KMCTR_ENCRYPTED_DEA",
+ [52] = "KMCTR_ENCRYPTED_TDEA_128",
+ [53] = "KMCTR_ENCRYPTED_TDEA_192",
+ [54] = "KMCTR_AES_128",
+ [55] = "KMCTR_AES_192",
+ [56] = "KMCTR_AES_256",
+ [57] = "KMCTR_ENCRYPTED_AES_128",
+ [58] = "KMCTR_ENCRYPTED_AES_192",
+ [59] = "KMCTR_ENCRYPTED_AES_256",
+ [60] = "KMO_DEA",
+ [61] = "KMO_TDEA_128",
+ [62] = "KMO_TDEA_192",
+ [63] = "KMO_ENCRYPTED_DEA",
+ [64] = "KMO_ENCRYPTED_TDEA_128",
+ [65] = "KMO_ENCRYPTED_TDEA_192",
+ [66] = "KMO_AES_128",
+ [67] = "KMO_AES_192",
+ [68] = "KMO_AES_256",
+ [69] = "KMO_ENCRYPTED_AES_128",
+ [70] = "KMO_ENCRYPTED_AES_192",
+ [71] = "KMO_ENCRYPTED_AES_256",
+ [72] = "KIMD_SHA_1",
+ [73] = "KIMD_SHA_256",
+ [74] = "KIMD_SHA_512",
+ [75] = "KIMD_SHA3_224",
+ [76] = "KIMD_SHA3_256",
+ [77] = "KIMD_SHA3_384",
+ [78] = "KIMD_SHA3_512",
+ [79] = "KIMD_SHAKE_128",
+ [80] = "KIMD_SHAKE_256",
+ [81] = "KIMD_GHASH",
+ [82] = "KLMD_SHA_1",
+ [83] = "KLMD_SHA_256",
+ [84] = "KLMD_SHA_512",
+ [85] = "KLMD_SHA3_224",
+ [86] = "KLMD_SHA3_256",
+ [87] = "KLMD_SHA3_384",
+ [88] = "KLMD_SHA3_512",
+ [89] = "KLMD_SHAKE_128",
+ [90] = "KLMD_SHAKE_256",
+ [91] = "KMAC_DEA",
+ [92] = "KMAC_TDEA_128",
+ [93] = "KMAC_TDEA_192",
+ [94] = "KMAC_ENCRYPTED_DEA",
+ [95] = "KMAC_ENCRYPTED_TDEA_128",
+ [96] = "KMAC_ENCRYPTED_TDEA_192",
+ [97] = "KMAC_AES_128",
+ [98] = "KMAC_AES_192",
+ [99] = "KMAC_AES_256",
+ [100] = "KMAC_ENCRYPTED_AES_128",
+ [101] = "KMAC_ENCRYPTED_AES_192",
+ [102] = "KMAC_ENCRYPTED_AES_256",
+ [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
+ [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
+ [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
+ [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
+ [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
+ [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
+ [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
+ [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
+ [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
+ [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
+ [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
+ [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
+ [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
+ [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
+ [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
+ [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
+ [119] = "PCC_SCALAR_MULTIPLY_P256",
+ [120] = "PCC_SCALAR_MULTIPLY_P384",
+ [121] = "PCC_SCALAR_MULTIPLY_P521",
+ [122] = "PCC_SCALAR_MULTIPLY_ED25519",
+ [123] = "PCC_SCALAR_MULTIPLY_ED448",
+ [124] = "PCC_SCALAR_MULTIPLY_X25519",
+ [125] = "PCC_SCALAR_MULTIPLY_X448",
+ [126] = "PRNO_SHA_512_DRNG",
+ [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
+ [128] = "PRNO_TRNG",
+ [129] = "KDSA_ECDSA_VERIFY_P256",
+ [130] = "KDSA_ECDSA_VERIFY_P384",
+ [131] = "KDSA_ECDSA_VERIFY_P521",
+ [132] = "KDSA_ECDSA_SIGN_P256",
+ [133] = "KDSA_ECDSA_SIGN_P384",
+ [134] = "KDSA_ECDSA_SIGN_P521",
+ [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
+ [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
+ [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
+ [138] = "KDSA_EDDSA_VERIFY_ED25519",
+ [139] = "KDSA_EDDSA_VERIFY_ED448",
+ [140] = "KDSA_EDDSA_SIGN_ED25519",
+ [141] = "KDSA_EDDSA_SIGN_ED448",
+ [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
+ [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
+ [144] = "PCKMO_ENCRYPT_DEA_KEY",
+ [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
+ [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
+ [147] = "PCKMO_ENCRYPT_AES_128_KEY",
+ [148] = "PCKMO_ENCRYPT_AES_192_KEY",
+ [149] = "PCKMO_ENCRYPT_AES_256_KEY",
+ [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
+ [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
+ [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
+ [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
+ [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
+ [155] = "IBM_RESERVED_155",
+ [156] = "IBM_RESERVED_156",
+};
+
+static void __init attr_event_free(struct attribute **attrs, int num)
+{
+ struct perf_pmu_events_attr *pa;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct device_attribute *dap;
+
+ dap = container_of(attrs[i], struct device_attribute, attr);
+ pa = container_of(dap, struct perf_pmu_events_attr, attr);
+ kfree(pa);
+ }
+ kfree(attrs);
+}
+
+static int __init attr_event_init_one(struct attribute **attrs, int num)
+{
+ struct perf_pmu_events_attr *pa;
+
+ pa = kzalloc(sizeof(*pa), GFP_KERNEL);
+ if (!pa)
+ return -ENOMEM;
+
+ sysfs_attr_init(&pa->attr.attr);
+ pa->id = PAI_CRYPTO_BASE + num;
+ pa->attr.attr.name = paicrypt_ctrnames[num];
+ pa->attr.attr.mode = 0444;
+ pa->attr.show = cpumf_events_sysfs_show;
+ pa->attr.store = NULL;
+ attrs[num] = &pa->attr.attr;
+ return 0;
+}
+
+/* Create PMU sysfs event attributes on the fly. */
+static int __init attr_event_init(void)
+{
+ struct attribute **attrs;
+ int ret, i;
+
+ attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
+ GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
+ ret = attr_event_init_one(attrs, i);
+ if (ret) {
+ attr_event_free(attrs, i - 1);
+ return ret;
+ }
+ }
+ attrs[i] = NULL;
+ paicrypt_events_group.attrs = attrs;
+ return 0;
+}
+
+static int __init paicrypt_init(void)
+{
+ struct qpaci_info_block ib;
+ int rc;
+
+ if (!test_facility(196))
+ return 0;
+
+ qpaci(&ib);
+ paicrypt_cnt = ib.num_cc;
+ if (paicrypt_cnt == 0)
+ return 0;
+ if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
+ paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
+
+ rc = attr_event_init(); /* Export known PAI crypto events */
+ if (rc) {
+ pr_err("Creation of PMU pai_crypto /sysfs failed\n");
+ return rc;
+ }
+
+ /* Setup s390dbf facility */
+ cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
+ if (!cfm_dbg) {
+ pr_err("Registration of s390dbf pai_crypto failed\n");
+ return -ENOMEM;
+ }
+ debug_register_view(cfm_dbg, &debug_sprintf_view);
+
+ rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
+ if (rc) {
+ pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
+ rc);
+ debug_unregister_view(cfm_dbg, &debug_sprintf_view);
+ debug_unregister(cfm_dbg);
+ return rc;
+ }
+ return 0;
+}
+
+device_initcall(paicrypt_init);
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 9438368c3632..a9a1a6f45375 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -14,6 +14,7 @@
* moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t
* %r3 = start address - where to jump to after the job is done...
+ * %r4 = subcode
*
* %r5 will be used as temp. storage
* %r6 holds the destination address
@@ -56,7 +57,7 @@ ENTRY(relocate_kernel)
jo 0b
j .base
.done:
- sgr %r0,%r0 # clear register r0
+ lgr %r0,%r4 # subcode
cghi %r3,0
je .diag
la %r4,load_psw-.base(%r13) # load psw-address into the register
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d860ac300919..8d91eccc0963 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -494,7 +494,7 @@ static void __init setup_lowcore_dat_off(void)
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
- set_prefix((u32)(unsigned long) lc);
+ set_prefix(__pa(lc));
lowcore_ptr[0] = lc;
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 326cb8f75f58..6b7b6d5e3632 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -364,7 +364,7 @@ static inline int check_sync_clock(void)
* Apply clock delta to the global data structures.
* This is called once on the CPU that performed the clock sync.
*/
-static void clock_sync_global(unsigned long delta)
+static void clock_sync_global(long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
@@ -400,7 +400,7 @@ static void clock_sync_global(unsigned long delta)
* Apply clock delta to the per-CPU data structures of this CPU.
* This is called for each online CPU after the call to clock_sync_global.
*/
-static void clock_sync_local(unsigned long delta)
+static void clock_sync_local(long delta)
{
/* Add the delta to the clock comparator. */
if (S390_lowcore.clock_comparator != clock_comparator_max) {
@@ -424,7 +424,7 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
- unsigned long clock_delta;
+ long clock_delta;
};
/*
@@ -544,7 +544,7 @@ static int stpinfo_valid(void)
static int stp_sync_clock(void *data)
{
struct clock_sync_data *sync = data;
- u64 clock_delta, flags;
+ long clock_delta, flags;
static int first;
int rc;
@@ -554,9 +554,7 @@ static int stp_sync_clock(void *data)
while (atomic_read(&sync->cpus) != 0)
cpu_relax();
rc = 0;
- if (stp_info.todoff[0] || stp_info.todoff[1] ||
- stp_info.todoff[2] || stp_info.todoff[3] ||
- stp_info.tmd != 2) {
+ if (stp_info.todoff || stp_info.tmd != 2) {
flags = vdso_update_begin();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
&clock_delta);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 99694260cac9..5075cde77b29 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/time_namespace.h>
+#include <linux/random.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
@@ -160,10 +161,9 @@ int vdso_getcpu_init(void)
}
early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
{
- unsigned long vdso_text_len, vdso_mapping_len;
- unsigned long vvar_start, vdso_text_start;
+ unsigned long vvar_start, vdso_text_start, vdso_text_len;
struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -180,8 +180,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_text_len = vdso64_end - vdso64_start;
vdso_mapping = &vdso64_mapping;
}
- vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
- vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+ vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
rc = vvar_start;
if (IS_ERR_VALUE(vvar_start))
goto out;
@@ -210,6 +209,52 @@ out:
return rc;
}
+static unsigned long vdso_addr(unsigned long start, unsigned long len)
+{
+ unsigned long addr, end, offset;
+
+ /*
+ * Round up the start address. It can start out unaligned as a result
+ * of stack start randomization.
+ */
+ start = PAGE_ALIGN(start);
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+ if (end >= VDSO_BASE)
+ end = VDSO_BASE;
+ end -= len;
+
+ if (end > start) {
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ addr = start + (offset << PAGE_SHIFT);
+ } else {
+ addr = start;
+ }
+ return addr;
+}
+
+unsigned long vdso_size(void)
+{
+ unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+
+ if (is_compat_task())
+ size += vdso32_end - vdso32_start;
+ else
+ size += vdso64_end - vdso64_start;
+ return PAGE_ALIGN(size);
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ unsigned long addr = VDSO_BASE;
+ unsigned long size = vdso_size();
+
+ if (current->flags & PF_RANDOMIZE)
+ addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
+ return map_vdso(addr, size);
+}
+
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int pages = (end - start) >> PAGE_SHIFT;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 5beb7a4a11b3..83bb5cf97282 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -11,7 +11,6 @@
#include <linux/kvm.h>
#include <linux/gfp.h>
#include <linux/errno.h>
-#include <linux/compat.h>
#include <linux/mm_types.h>
#include <linux/pgtable.h>
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5e7ea8b111e8..04d4c6cf898e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
int owner;
asm_inline volatile(
- ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
return owner;
@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
int expected = old;
asm_inline volatile(
- ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index e54f928503c5..d545f5c39f7e 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -58,9 +58,9 @@ static inline unsigned long mmap_base(unsigned long rnd,
/*
* Top of mmap area (just below the process stack).
- * Leave at least a ~32 MB hole.
+ * Leave at least a ~128 MB hole.
*/
- gap_min = 32 * 1024 * 1024UL;
+ gap_min = SZ_128M;
gap_max = (STACK_TOP / 6) * 5;
if (gap < gap_min)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e563cb65c0c4..bc980fd313d5 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -799,7 +799,7 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
struct zpci_dev *zdev;
int rc;
- zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
+ zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
if (!zdev)
return ERR_PTR(-ENOMEM);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1057d7af4a55..375e0a5120bc 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -30,7 +30,7 @@ bool zpci_unique_uid;
void update_uid_checking(bool new)
{
if (zpci_unique_uid != new)
- zpci_dbg(1, "uid checking:%d\n", new);
+ zpci_dbg(3, "uid checking:%d\n", new);
zpci_unique_uid = new;
}
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 3408c0df3ebf..ca6bd98eec13 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -196,7 +196,7 @@ int __init zpci_debug_init(void)
if (!pci_debug_err_id)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
- debug_set_level(pci_debug_err_id, 6);
+ debug_set_level(pci_debug_err_id, 3);
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index ea9db5cea64e..b9324ca2eb94 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -321,9 +321,6 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
ccdf->fid, ccdf->fh, ccdf->pec);
- zpci_err("avail CCDF:\n");
- zpci_err_hex(ccdf, sizeof(*ccdf));
-
switch (ccdf->pec) {
case 0x0301: /* Reserved|Standby -> Configured */
if (!zdev) {
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 1710d006ee93..1a822b7799f8 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -18,16 +18,40 @@
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
-static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
+struct zpci_err_insn_data {
+ u8 insn;
+ u8 cc;
+ u8 status;
+ union {
+ struct {
+ u64 req;
+ u64 offset;
+ };
+ struct {
+ u64 addr;
+ u64 len;
+ };
+ };
+} __packed;
+
+static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status,
+ u64 req, u64 offset)
{
- struct {
- u64 req;
- u64 offset;
- u8 cc;
- u8 status;
- } __packed data = {req, offset, cc, status};
-
- zpci_err_hex(&data, sizeof(data));
+ struct zpci_err_insn_data data = {
+ .insn = insn, .cc = cc, .status = status,
+ .req = req, .offset = offset};
+
+ zpci_err_hex_level(lvl, &data, sizeof(data));
+}
+
+static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
+ u64 addr, u64 len)
+{
+ struct zpci_err_insn_data data = {
+ .insn = insn, .cc = cc, .status = status,
+ .addr = addr, .len = len};
+
+ zpci_err_hex_level(lvl, &data, sizeof(data));
}
/* Modify PCI Function Controls */
@@ -47,16 +71,24 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
{
+ bool retried = false;
u8 cc;
do {
cc = __mpcifc(req, fib, status);
- if (cc == 2)
+ if (cc == 2) {
msleep(ZPCI_INSN_BUSY_DELAY);
+ if (!retried) {
+ zpci_err_insn_req(1, 'M', cc, *status, req, 0);
+ retried = true;
+ }
+ }
} while (cc == 2);
if (cc)
- zpci_err_insn(cc, *status, req, 0);
+ zpci_err_insn_req(0, 'M', cc, *status, req, 0);
+ else if (retried)
+ zpci_err_insn_req(1, 'M', cc, *status, req, 0);
return cc;
}
@@ -80,16 +112,24 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
{
+ bool retried = false;
u8 cc, status;
do {
cc = __rpcit(fn, addr, range, &status);
- if (cc == 2)
+ if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
+ if (!retried) {
+ zpci_err_insn_addr(1, 'R', cc, status, addr, range);
+ retried = true;
+ }
+ }
} while (cc == 2);
if (cc)
- zpci_err_insn(cc, status, addr, range);
+ zpci_err_insn_addr(0, 'R', cc, status, addr, range);
+ else if (retried)
+ zpci_err_insn_addr(1, 'R', cc, status, addr, range);
if (cc == 1 && (status == 4 || status == 16))
return -ENOMEM;
@@ -144,17 +184,25 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
int __zpci_load(u64 *data, u64 req, u64 offset)
{
+ bool retried = false;
u8 status;
int cc;
do {
cc = __pcilg(data, req, offset, &status);
- if (cc == 2)
+ if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
+ if (!retried) {
+ zpci_err_insn_req(1, 'l', cc, status, req, offset);
+ retried = true;
+ }
+ }
} while (cc == 2);
if (cc)
- zpci_err_insn(cc, status, req, offset);
+ zpci_err_insn_req(0, 'l', cc, status, req, offset);
+ else if (retried)
+ zpci_err_insn_req(1, 'l', cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
@@ -198,7 +246,7 @@ int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
cc = __pcilg_mio(data, (__force u64) addr, len, &status);
if (cc)
- zpci_err_insn(cc, status, 0, (__force u64) addr);
+ zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len);
return (cc > 0) ? -EIO : cc;
}
@@ -225,17 +273,25 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
int __zpci_store(u64 data, u64 req, u64 offset)
{
+ bool retried = false;
u8 status;
int cc;
do {
cc = __pcistg(data, req, offset, &status);
- if (cc == 2)
+ if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
+ if (!retried) {
+ zpci_err_insn_req(1, 's', cc, status, req, offset);
+ retried = true;
+ }
+ }
} while (cc == 2);
if (cc)
- zpci_err_insn(cc, status, req, offset);
+ zpci_err_insn_req(0, 's', cc, status, req, offset);
+ else if (retried)
+ zpci_err_insn_req(1, 's', cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
@@ -278,7 +334,7 @@ int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
cc = __pcistg_mio(data, (__force u64) addr, len, &status);
if (cc)
- zpci_err_insn(cc, status, 0, (__force u64) addr);
+ zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len);
return (cc > 0) ? -EIO : cc;
}
@@ -304,17 +360,25 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
int __zpci_store_block(const u64 *data, u64 req, u64 offset)
{
+ bool retried = false;
u8 status;
int cc;
do {
cc = __pcistb(data, req, offset, &status);
- if (cc == 2)
+ if (cc == 2) {
udelay(ZPCI_INSN_BUSY_DELAY);
+ if (!retried) {
+ zpci_err_insn_req(0, 'b', cc, status, req, offset);
+ retried = true;
+ }
+ }
} while (cc == 2);
if (cc)
- zpci_err_insn(cc, status, req, offset);
+ zpci_err_insn_req(0, 'b', cc, status, req, offset);
+ else if (retried)
+ zpci_err_insn_req(1, 'b', cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
@@ -358,7 +422,7 @@ int zpci_write_block(volatile void __iomem *dst,
cc = __pcistb_mio(src, (__force u64) dst, len, &status);
if (cc)
- zpci_err_insn(cc, status, 0, (__force u64) dst);
+ zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len);
return (cc > 0) ? -EIO : cc;
}
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 3d1c31e0cf3d..6f835124ee82 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -44,11 +44,14 @@
.endm
.macro MEMSWAP dst,src,buf,len
-10: cghi \len,bufsz
+10: larl %r0,purgatory_end
+ larl %r1,stack
+ slgr %r0,%r1
+ cgr \len,%r0
jh 11f
lgr %r4,\len
j 12f
-11: lghi %r4,bufsz
+11: lgr %r4,%r0
12: MEMCPY \buf,\dst,%r4
MEMCPY \dst,\src,%r4
@@ -135,12 +138,18 @@ ENTRY(purgatory_start)
.start_crash_kernel:
/* Location of purgatory_start in crash memory */
+ larl %r0,.base_crash
+ larl %r1,purgatory_start
+ slgr %r0,%r1
lgr %r8,%r13
- aghi %r8,-(.base_crash-purgatory_start)
+ sgr %r8,%r0
/* Destination for this code i.e. end of memory to be swapped. */
+ larl %r0,purgatory_end
+ larl %r1,purgatory_start
+ slgr %r0,%r1
lg %r9,crash_size-.base_crash(%r13)
- aghi %r9,-(purgatory_end-purgatory_start)
+ sgr %r9,%r0
/* Destination in crash memory, i.e. same as r9 but in crash memory. */
lg %r10,crash_start-.base_crash(%r13)
@@ -149,15 +158,19 @@ ENTRY(purgatory_start)
/* Buffer location (in crash memory) and size. As the purgatory is
* behind the point of no return it can re-use the stack as buffer.
*/
- lghi %r11,bufsz
+ larl %r11,purgatory_end
larl %r12,stack
+ slgr %r11,%r12
MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */
MEMCPY %r9,%r8,%r11 /* self -> dst */
/* Jump to new location. */
lgr %r7,%r9
- aghi %r7,.jump_to_dst-purgatory_start
+ larl %r0,.jump_to_dst
+ larl %r1,purgatory_start
+ slgr %r0,%r1
+ agr %r7,%r0
br %r7
.jump_to_dst:
@@ -169,7 +182,10 @@ ENTRY(purgatory_start)
/* Load new buffer location after jump */
larl %r7,stack
- aghi %r10,stack-purgatory_start
+ lgr %r0,%r7
+ larl %r1,purgatory_start
+ slgr %r0,%r1
+ agr %r10,%r0
MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */
/* Now the code is set up to run from its designated location. Start
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 43184640b579..674ed46d3ced 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -10,7 +10,7 @@
#include <asm/fpu/api.h>
/* Check that the stack and regs on entry from user mode are sane. */
-static __always_inline void arch_check_user_regs(struct pt_regs *regs)
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
/*
@@ -42,7 +42,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
WARN_ON_ONCE(regs != task_pt_regs(current));
}
}
-#define arch_check_user_regs arch_check_user_regs
+#define arch_enter_from_user_mode arch_enter_from_user_mode
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index f356607835d8..4ae07c7e2175 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -771,35 +771,36 @@ static struct tty_driver *con3215_device(struct console *c, int *index)
}
/*
- * panic() calls con3215_flush through a panic_notifier
- * before the system enters a disabled, endless loop.
+ * The below function is called as a panic/reboot notifier before the
+ * system enters a disabled, endless loop.
+ *
+ * Notice we must use the spin_trylock() alternative, to prevent lockups
+ * in atomic context (panic routine runs with secondary CPUs, local IRQs
+ * and preemption disabled).
*/
-static void con3215_flush(void)
+static int con3215_notify(struct notifier_block *self,
+ unsigned long event, void *data)
{
struct raw3215_info *raw;
unsigned long flags;
raw = raw3215[0]; /* console 3215 is the first one */
- spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+ if (!spin_trylock_irqsave(get_ccwdev_lock(raw->cdev), flags))
+ return NOTIFY_DONE;
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
-}
-static int con3215_notify(struct notifier_block *self,
- unsigned long event, void *data)
-{
- con3215_flush();
- return NOTIFY_OK;
+ return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3215_notify,
- .priority = 0,
+ .priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3215_notify,
- .priority = 0,
+ .priority = INT_MIN + 1, /* run the callback late */
};
/*
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index e4592890f20a..10f6a37fb153 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -535,20 +535,26 @@ con3270_wait_write(struct con3270 *cp)
}
/*
- * panic() calls con3270_flush through a panic_notifier
- * before the system enters a disabled, endless loop.
+ * The below function is called as a panic/reboot notifier before the
+ * system enters a disabled, endless loop.
+ *
+ * Notice we must use the spin_trylock() alternative, to prevent lockups
+ * in atomic context (panic routine runs with secondary CPUs, local IRQs
+ * and preemption disabled).
*/
-static void
-con3270_flush(void)
+static int con3270_notify(struct notifier_block *self,
+ unsigned long event, void *data)
{
struct con3270 *cp;
unsigned long flags;
cp = condev;
if (!cp->view.dev)
- return;
- raw3270_activate_view(&cp->view);
- spin_lock_irqsave(&cp->view.lock, flags);
+ return NOTIFY_DONE;
+ if (!raw3270_view_lock_unavailable(&cp->view))
+ raw3270_activate_view(&cp->view);
+ if (!spin_trylock_irqsave(&cp->view.lock, flags))
+ return NOTIFY_DONE;
con3270_wait_write(cp);
cp->nr_up = 0;
con3270_rebuild_update(cp);
@@ -560,23 +566,18 @@ con3270_flush(void)
con3270_wait_write(cp);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
-}
-static int con3270_notify(struct notifier_block *self,
- unsigned long event, void *data)
-{
- con3270_flush();
- return NOTIFY_OK;
+ return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3270_notify,
- .priority = 0,
+ .priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3270_notify,
- .priority = 0,
+ .priority = INT_MIN + 1, /* run the callback late */
};
/*
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index dfde0d941c3c..4e2b3a1a3b2e 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -831,6 +831,21 @@ raw3270_create_device(struct ccw_device *cdev)
}
/*
+ * This helper just validates that it is safe to activate a
+ * view in the panic() context, due to locking restrictions.
+ */
+int raw3270_view_lock_unavailable(struct raw3270_view *view)
+{
+ struct raw3270 *rp = view->dev;
+
+ if (!rp)
+ return -ENODEV;
+ if (spin_is_locked(get_ccwdev_lock(rp->cdev)))
+ return -EBUSY;
+ return 0;
+}
+
+/*
* Activate a view.
*/
int
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index c6645167cd2b..4cb6b5ee44ca 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -160,6 +160,7 @@ struct raw3270_view {
};
int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
+int raw3270_view_lock_unavailable(struct raw3270_view *view);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index fe5ee2646fcf..e5d947c763ea 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -220,30 +220,34 @@ sclp_console_device(struct console *c, int *index)
}
/*
- * Make sure that all buffers will be flushed to the SCLP.
+ * This panic/reboot notifier makes sure that all buffers
+ * will be flushed to the SCLP.
*/
-static void
-sclp_console_flush(void)
+static int sclp_console_notify(struct notifier_block *self,
+ unsigned long event, void *data)
{
+ /*
+ * Perform the lock check before effectively getting the
+ * lock on sclp_conbuf_emit() / sclp_console_sync_queue()
+ * to prevent potential lockups in atomic context.
+ */
+ if (spin_is_locked(&sclp_con_lock))
+ return NOTIFY_DONE;
+
sclp_conbuf_emit();
sclp_console_sync_queue();
-}
-static int sclp_console_notify(struct notifier_block *self,
- unsigned long event, void *data)
-{
- sclp_console_flush();
- return NOTIFY_OK;
+ return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
- .priority = 1,
+ .priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_console_notify,
- .priority = 1,
+ .priority = INT_MIN + 1, /* run the callback late */
};
/*
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index e9943a86c361..dd313ff57df3 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -49,8 +49,10 @@ static void __init sclp_early_facilities_detect(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
- if (sccb->cpuoff > 134)
+ if (sccb->cpuoff > 134) {
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+ sclp.has_iplcc = !!(sccb->byte_134 & 0x02);
+ }
if (sccb->cpuoff > 137)
sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 3b4e7e5d9b71..a32f34a1c6d2 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -769,21 +769,6 @@ __initcall(sclp_vt220_tty_init);
#ifdef CONFIG_SCLP_VT220_CONSOLE
-static void __sclp_vt220_flush_buffer(void)
-{
- unsigned long flags;
-
- sclp_vt220_emit_current();
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- del_timer(&sclp_vt220_timer);
- while (sclp_vt220_queue_running) {
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- sclp_sync_wait();
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- }
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
-}
-
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
@@ -797,22 +782,41 @@ sclp_vt220_con_device(struct console *c, int *index)
return sclp_vt220_driver;
}
+/*
+ * This panic/reboot notifier runs in atomic context, so
+ * locking restrictions apply to prevent potential lockups.
+ */
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
{
- __sclp_vt220_flush_buffer();
- return NOTIFY_OK;
+ unsigned long flags;
+
+ if (spin_is_locked(&sclp_vt220_lock))
+ return NOTIFY_DONE;
+
+ sclp_vt220_emit_current();
+
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ del_timer(&sclp_vt220_timer);
+ while (sclp_vt220_queue_running) {
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ }
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+ return NOTIFY_DONE;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_vt220_notify,
- .priority = 1,
+ .priority = INT_MIN + 1, /* run the callback late */
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_vt220_notify,
- .priority = 1,
+ .priority = INT_MIN + 1, /* run the callback late */
};
/* Structure needed to register with printk */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 297fb399363c..620a917cd3a1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1255,7 +1255,7 @@ exit:
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta)
{
struct {
struct chsc_header request;
@@ -1266,7 +1266,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
unsigned int rsvd2[5];
struct chsc_header response;
unsigned int rsvd3[3];
- u64 clock_delta;
+ s64 clock_delta;
unsigned int rsvd4[2];
} *rr;
int rc;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index fdf16cb70881..5c13d2079d96 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -179,7 +179,7 @@ static int ap_qci_available(void)
* ap_apft_available(): Test if AP facilities test (APFT)
* facility is available.
*
- * Returns 1 if APFT is is available.
+ * Returns 1 if APFT is available.
*/
static int ap_apft_available(void)
{
@@ -693,6 +693,24 @@ void ap_send_online_uevent(struct ap_device *ap_dev, int online)
}
EXPORT_SYMBOL(ap_send_online_uevent);
+static void ap_send_mask_changed_uevent(unsigned long *newapm,
+ unsigned long *newaqm)
+{
+ char buf[100];
+ char *envp[] = { buf, NULL };
+
+ if (newapm)
+ snprintf(buf, sizeof(buf),
+ "APMASK=0x%016lx%016lx%016lx%016lx\n",
+ newapm[0], newapm[1], newapm[2], newapm[3]);
+ else
+ snprintf(buf, sizeof(buf),
+ "AQMASK=0x%016lx%016lx%016lx%016lx\n",
+ newaqm[0], newaqm[1], newaqm[2], newaqm[3]);
+
+ kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
+}
+
/*
* calc # of bound APQNs
*/
@@ -704,7 +722,7 @@ struct __ap_calc_ctrs {
static int __ap_calc_helper(struct device *dev, void *arg)
{
- struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *) arg;
+ struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
if (is_queue_dev(dev)) {
pctrs->apqns++;
@@ -720,7 +738,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
struct __ap_calc_ctrs ctrs;
memset(&ctrs, 0, sizeof(ctrs));
- bus_for_each_dev(&ap_bus_type, NULL, (void *) &ctrs, __ap_calc_helper);
+ bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
*apqns = ctrs.apqns;
*bound = ctrs.bound;
@@ -781,7 +799,7 @@ EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
if (is_queue_dev(dev) &&
- AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
+ AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
device_unregister(dev);
return 0;
}
@@ -794,8 +812,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm)
- && test_bit_inv(queue, ap_perms.aqm);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
mutex_unlock(&ap_perms_mutex);
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
@@ -826,8 +844,8 @@ int ap_owned_by_def_drv(int card, int queue)
mutex_lock(&ap_perms_mutex);
- if (test_bit_inv(card, ap_perms.apm)
- && test_bit_inv(queue, ap_perms.aqm))
+ if (test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm))
rc = 1;
mutex_unlock(&ap_perms_mutex);
@@ -876,8 +894,8 @@ static int ap_device_probe(struct device *dev)
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm)
- && test_bit_inv(queue, ap_perms.aqm);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
mutex_unlock(&ap_perms_mutex);
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
@@ -898,8 +916,9 @@ static int ap_device_probe(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- } else
+ } else {
ap_check_bindings_complete();
+ }
out:
if (rc)
@@ -980,8 +999,8 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
-* A config change has happened, force an ap bus rescan.
-*/
+ * A config change has happened, force an ap bus rescan.
+ */
void ap_bus_cfg_chg(void)
{
AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
@@ -1105,7 +1124,7 @@ int ap_parse_mask_str(const char *str,
if (bits & 0x07)
return -EINVAL;
- size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
newmap = kmalloc(size, GFP_KERNEL);
if (!newmap)
return -ENOMEM;
@@ -1241,8 +1260,9 @@ static ssize_t poll_thread_store(struct bus_type *bus,
rc = ap_poll_thread_start();
if (rc)
count = rc;
- } else
+ } else {
ap_poll_thread_stop();
+ }
return count;
}
@@ -1355,7 +1375,7 @@ static int apmask_commit(unsigned long *newapm)
static ssize_t apmask_store(struct bus_type *bus, const char *buf,
size_t count)
{
- int rc;
+ int rc, changes = 0;
DECLARE_BITMAP(newapm, AP_DEVICES);
if (mutex_lock_interruptible(&ap_perms_mutex))
@@ -1365,14 +1385,19 @@ static ssize_t apmask_store(struct bus_type *bus, const char *buf,
if (rc)
goto done;
- rc = apmask_commit(newapm);
+ changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
+ if (changes)
+ rc = apmask_commit(newapm);
done:
mutex_unlock(&ap_perms_mutex);
if (rc)
return rc;
- ap_bus_revise_bindings();
+ if (changes) {
+ ap_bus_revise_bindings();
+ ap_send_mask_changed_uevent(newapm, NULL);
+ }
return count;
}
@@ -1443,7 +1468,7 @@ static int aqmask_commit(unsigned long *newaqm)
static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
size_t count)
{
- int rc;
+ int rc, changes = 0;
DECLARE_BITMAP(newaqm, AP_DOMAINS);
if (mutex_lock_interruptible(&ap_perms_mutex))
@@ -1453,14 +1478,19 @@ static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
if (rc)
goto done;
- rc = aqmask_commit(newaqm);
+ changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
+ if (changes)
+ rc = aqmask_commit(newaqm);
done:
mutex_unlock(&ap_perms_mutex);
if (rc)
return rc;
- ap_bus_revise_bindings();
+ if (changes) {
+ ap_bus_revise_bindings();
+ ap_send_mask_changed_uevent(NULL, newaqm);
+ }
return count;
}
@@ -1605,9 +1635,9 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
apinfo.mode = (func >> 26) & 0x07;
apinfo.cat = AP_DEVICE_TYPE_CEX8;
status = ap_qact(qid, 0, &apinfo);
- if (status.response_code == AP_RESPONSE_NORMAL
- && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
- && apinfo.cat <= AP_DEVICE_TYPE_CEX8)
+ if (status.response_code == AP_RESPONSE_NORMAL &&
+ apinfo.cat >= AP_DEVICE_TYPE_CEX2A &&
+ apinfo.cat <= AP_DEVICE_TYPE_CEX8)
comp_type = apinfo.cat;
}
if (!comp_type)
@@ -1627,7 +1657,7 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
*/
static int __match_card_device_with_id(struct device *dev, const void *data)
{
- return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data;
+ return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
}
/*
@@ -1636,7 +1666,7 @@ static int __match_card_device_with_id(struct device *dev, const void *data)
*/
static int __match_queue_device_with_qid(struct device *dev, const void *data)
{
- return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+ return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
}
/*
@@ -1645,8 +1675,8 @@ static int __match_queue_device_with_qid(struct device *dev, const void *data)
*/
static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
{
- return is_queue_dev(dev)
- && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data;
+ return is_queue_dev(dev) &&
+ AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
}
/* Helper function for notify_config_changed */
@@ -1699,7 +1729,7 @@ static inline void notify_scan_complete(void)
static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
{
bus_for_each_dev(&ap_bus_type, NULL,
- (void *)(long) ac->id,
+ (void *)(long)ac->id,
__ap_queue_devices_with_id_unregister);
device_unregister(&ac->ap_dev.device);
}
@@ -1727,7 +1757,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
for (dom = 0; dom <= ap_max_domain_id; dom++) {
qid = AP_MKQID(ac->id, dom);
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) qid,
+ (void *)(long)qid,
__match_queue_device_with_qid);
aq = dev ? to_ap_queue(dev) : NULL;
if (!ap_test_config_usage_domain(dom)) {
@@ -1873,7 +1903,7 @@ static inline void ap_scan_adapter(int ap)
/* Is there currently a card device for this adapter ? */
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) ap,
+ (void *)(long)ap,
__match_card_device_with_id);
ac = dev ? to_ap_card(dev) : NULL;
@@ -2074,7 +2104,7 @@ static void ap_scan_bus(struct work_struct *unused)
if (ap_domain_index >= 0) {
struct device *dev =
bus_find_device(&ap_bus_type, NULL,
- (void *)(long) ap_domain_index,
+ (void *)(long)ap_domain_index,
__match_queue_device_with_queue_id);
if (dev)
put_device(dev);
@@ -2109,7 +2139,7 @@ static int __init ap_debug_init(void)
static void __init ap_perms_init(void)
{
- /* all resources useable if no kernel parameter string given */
+ /* all resources usable if no kernel parameter string given */
memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6a65885f5f43..0c40af157df2 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -317,6 +317,7 @@ struct ap_perms {
unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)];
};
+
extern struct ap_perms ap_perms;
extern struct mutex ap_perms_mutex;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 205045cd998d..c48b0db824e3 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -99,7 +99,7 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
struct ap_queue_status status;
- if (msg == NULL)
+ if (!msg)
return -EINVAL;
status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
switch (status.response_code) {
@@ -603,7 +603,7 @@ static ssize_t interrupt_show(struct device *dev,
static DEVICE_ATTR_RO(interrupt);
static ssize_t config_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc;
@@ -827,8 +827,9 @@ int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
aq->requestq_count++;
aq->total_request_count++;
atomic64_inc(&aq->card->total_request_count);
- } else
+ } else {
rc = -ENODEV;
+ }
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7f69ca695fc2..7329caa7d467 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -232,7 +232,7 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
int i, rc;
u16 card, dom;
u32 nr_apqns, *apqns = NULL;
- struct ep11keyblob *kb = (struct ep11keyblob *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
zcrypt_wait_api_operational();
@@ -267,12 +267,12 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain,
u16 *pkeysize, u32 *pattributes)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
+ struct secaeskeytoken *t = (struct secaeskeytoken *)seckey;
u16 cardnr, domain;
int rc;
/* check the secure key for valid AES secure key */
- rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0);
+ rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0);
if (rc)
goto out;
if (pattributes)
@@ -425,9 +425,9 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
t = (struct clearaeskeytoken *)key;
if (keylen != sizeof(*t) + t->len)
goto out;
- if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
- || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
- || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+ if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) ||
+ (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) ||
+ (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
memcpy(ckey.clrkey, t->clearkey, t->len);
else
goto out;
@@ -541,7 +541,6 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
-
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
@@ -588,9 +587,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
} else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_genseckey(card, dom, ksize, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
- } else /* TOKVER_CCA_VLSC */
+ } else {
+ /* TOKVER_CCA_VLSC */
rc = cca_gencipherkey(card, dom, ksize, kflags,
keybuf, keybufsize);
+ }
if (rc == 0)
break;
}
@@ -645,9 +646,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
rc = cca_clr2seckey(card, dom, ksize,
clrkey, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
- } else /* TOKVER_CCA_VLSC */
+ } else {
+ /* TOKVER_CCA_VLSC */
rc = cca_clr2cipherkey(card, dom, ksize, kflags,
clrkey, keybuf, keybufsize);
+ }
if (rc == 0)
break;
}
@@ -667,8 +670,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
- if (hdr->type == TOKTYPE_CCA_INTERNAL
- && hdr->version == TOKVER_CCA_AES) {
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -677,7 +680,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
if (ktype)
*ktype = PKEY_TYPE_CCA_DATA;
if (ksize)
- *ksize = (enum pkey_key_size) t->bitsize;
+ *ksize = (enum pkey_key_size)t->bitsize;
rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
@@ -697,8 +700,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL
- && hdr->version == TOKVER_CCA_VLSC) {
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -734,8 +737,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
- } else if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_AES) {
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
@@ -757,8 +760,9 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
- } else
+ } else {
rc = -EINVAL;
+ }
out:
kfree(_apqns);
@@ -816,16 +820,17 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (hdr->type == TOKTYPE_CCA_INTERNAL
- && hdr->version == TOKVER_CCA_AES)
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
rc = cca_sec2protkey(card, dom, key, pkey->protkey,
&pkey->len, &pkey->type);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL
- && hdr->version == TOKVER_CCA_VLSC)
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
&pkey->len, &pkey->type);
- else { /* EP11 AES secure key blob */
- struct ep11keyblob *kb = (struct ep11keyblob *) key;
+ } else {
+ /* EP11 AES secure key blob */
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
pkey->len = sizeof(pkey->protkey);
rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
@@ -851,10 +856,10 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
zcrypt_wait_api_operational();
- if (hdr->type == TOKTYPE_NON_CCA
- && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
- || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
- && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
int minhwtype = 0, api = 0;
struct ep11keyblob *kb = (struct ep11keyblob *)
(key + sizeof(struct ep11kblob_header));
@@ -869,11 +874,11 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
minhwtype, api, kb->wkvp);
if (rc)
goto out;
- } else if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_AES
- && is_ep11_keyblob(key)) {
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
int minhwtype = 0, api = 0;
- struct ep11keyblob *kb = (struct ep11keyblob *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL;
@@ -931,8 +936,9 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
- } else
+ } else {
return -EINVAL;
+ }
if (apqns) {
if (*nr_apqns < _nr_apqns)
@@ -961,9 +967,9 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
int minhwtype = ZCRYPT_CEX3C;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = *((u64 *) cur_mkvp);
+ cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = *((u64 *) alt_mkvp);
+ old_mkvp = *((u64 *)alt_mkvp);
if (ktype == PKEY_TYPE_CCA_CIPHER)
minhwtype = ZCRYPT_CEX6;
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
@@ -975,9 +981,9 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
u64 cur_mkvp = 0, old_mkvp = 0;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = *((u64 *) cur_mkvp);
+ cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = *((u64 *) alt_mkvp);
+ old_mkvp = *((u64 *)alt_mkvp);
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
cur_mkvp, old_mkvp, 1);
@@ -996,8 +1002,9 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
if (rc)
goto out;
- } else
+ } else {
return -EINVAL;
+ }
if (apqns) {
if (*nr_apqns < _nr_apqns)
@@ -1026,21 +1033,21 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
- if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_AES_WITH_HEADER
- && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 AES key blob with header */
if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_ECC_WITH_HEADER
- && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 ECC key blob with header */
if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_AES
- && is_ep11_keyblob(key)) {
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
/* EP11 AES key blob with header in session field */
if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
return -EINVAL;
@@ -1088,15 +1095,15 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (hdr->type == TOKTYPE_NON_CCA
- && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
- || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
- && is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
protkey, protkeylen, protkeytype);
- else if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_AES
- && is_ep11_keyblob(key))
+ else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key))
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
protkey, protkeylen, protkeytype);
else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
@@ -1144,7 +1151,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
switch (cmd) {
case PKEY_GENSECK: {
- struct pkey_genseck __user *ugs = (void __user *) arg;
+ struct pkey_genseck __user *ugs = (void __user *)arg;
struct pkey_genseck kgs;
if (copy_from_user(&kgs, ugs, sizeof(kgs)))
@@ -1159,7 +1166,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_CLR2SECK: {
- struct pkey_clr2seck __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck __user *ucs = (void __user *)arg;
struct pkey_clr2seck kcs;
if (copy_from_user(&kcs, ucs, sizeof(kcs)))
@@ -1175,7 +1182,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_SEC2PROTK: {
- struct pkey_sec2protk __user *usp = (void __user *) arg;
+ struct pkey_sec2protk __user *usp = (void __user *)arg;
struct pkey_sec2protk ksp;
if (copy_from_user(&ksp, usp, sizeof(ksp)))
@@ -1191,7 +1198,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_CLR2PROTK: {
- struct pkey_clr2protk __user *ucp = (void __user *) arg;
+ struct pkey_clr2protk __user *ucp = (void __user *)arg;
struct pkey_clr2protk kcp;
if (copy_from_user(&kcp, ucp, sizeof(kcp)))
@@ -1207,7 +1214,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_FINDCARD: {
- struct pkey_findcard __user *ufc = (void __user *) arg;
+ struct pkey_findcard __user *ufc = (void __user *)arg;
struct pkey_findcard kfc;
if (copy_from_user(&kfc, ufc, sizeof(kfc)))
@@ -1222,7 +1229,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_SKEY2PKEY: {
- struct pkey_skey2pkey __user *usp = (void __user *) arg;
+ struct pkey_skey2pkey __user *usp = (void __user *)arg;
struct pkey_skey2pkey ksp;
if (copy_from_user(&ksp, usp, sizeof(ksp)))
@@ -1236,7 +1243,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_VERIFYKEY: {
- struct pkey_verifykey __user *uvk = (void __user *) arg;
+ struct pkey_verifykey __user *uvk = (void __user *)arg;
struct pkey_verifykey kvk;
if (copy_from_user(&kvk, uvk, sizeof(kvk)))
@@ -1251,7 +1258,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_GENPROTK: {
- struct pkey_genprotk __user *ugp = (void __user *) arg;
+ struct pkey_genprotk __user *ugp = (void __user *)arg;
struct pkey_genprotk kgp;
if (copy_from_user(&kgp, ugp, sizeof(kgp)))
@@ -1265,7 +1272,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_VERIFYPROTK: {
- struct pkey_verifyprotk __user *uvp = (void __user *) arg;
+ struct pkey_verifyprotk __user *uvp = (void __user *)arg;
struct pkey_verifyprotk kvp;
if (copy_from_user(&kvp, uvp, sizeof(kvp)))
@@ -1275,7 +1282,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_KBLOB2PROTK: {
- struct pkey_kblob2pkey __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey __user *utp = (void __user *)arg;
struct pkey_kblob2pkey ktp;
u8 *kkey;
@@ -1294,7 +1301,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_GENSECK2: {
- struct pkey_genseck2 __user *ugs = (void __user *) arg;
+ struct pkey_genseck2 __user *ugs = (void __user *)arg;
struct pkey_genseck2 kgs;
struct pkey_apqn *apqns;
size_t klen = KEYBLOBBUFSIZE;
@@ -1336,7 +1343,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_CLR2SECK2: {
- struct pkey_clr2seck2 __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck2 __user *ucs = (void __user *)arg;
struct pkey_clr2seck2 kcs;
struct pkey_apqn *apqns;
size_t klen = KEYBLOBBUFSIZE;
@@ -1379,7 +1386,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_VERIFYKEY2: {
- struct pkey_verifykey2 __user *uvk = (void __user *) arg;
+ struct pkey_verifykey2 __user *uvk = (void __user *)arg;
struct pkey_verifykey2 kvk;
u8 *kkey;
@@ -1400,7 +1407,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_KBLOB2PROTK2: {
- struct pkey_kblob2pkey2 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey2 __user *utp = (void __user *)arg;
struct pkey_kblob2pkey2 ktp;
struct pkey_apqn *apqns = NULL;
u8 *kkey;
@@ -1427,7 +1434,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_APQNS4K: {
- struct pkey_apqns4key __user *uak = (void __user *) arg;
+ struct pkey_apqns4key __user *uak = (void __user *)arg;
struct pkey_apqns4key kak;
struct pkey_apqn *apqns = NULL;
size_t nr_apqns, len;
@@ -1476,7 +1483,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_APQNS4KT: {
- struct pkey_apqns4keytype __user *uat = (void __user *) arg;
+ struct pkey_apqns4keytype __user *uat = (void __user *)arg;
struct pkey_apqns4keytype kat;
struct pkey_apqn *apqns = NULL;
size_t nr_apqns, len;
@@ -1518,7 +1525,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
break;
}
case PKEY_KBLOB2PROTK3: {
- struct pkey_kblob2pkey3 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey3 __user *utp = (void __user *)arg;
struct pkey_kblob2pkey3 ktp;
struct pkey_apqn *apqns = NULL;
u32 protkeylen = PROTKEYBLOBBUFSIZE;
@@ -1708,7 +1715,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
int rc;
- struct pkey_seckey *seckey = (struct pkey_seckey *) buf;
+ struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
if (off != 0 || count < sizeof(struct secaeskeytoken))
return -EINVAL;
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 29ebd54f8919..4ac9c6521ec1 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -46,8 +46,6 @@ static struct ap_device_id ap_queue_ids[] = {
{ /* end of sibling */ },
};
-MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
-
static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
{
struct ap_matrix_mdev *matrix_mdev;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index aa6dc3c0c353..f94b43ce9a65 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -104,7 +104,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
struct zcrypt_ops *zops;
list_for_each_entry(zops, &zcrypt_ops_list, list)
- if ((zops->variant == variant) &&
+ if (zops->variant == variant &&
(!strncmp(zops->name, name, sizeof(zops->name))))
return zops;
return NULL;
@@ -438,8 +438,8 @@ static int zcdn_create(const char *name)
strncpy(nodename, name, sizeof(nodename));
else
snprintf(nodename, sizeof(nodename),
- ZCRYPT_NAME "_%d", (int) MINOR(devt));
- nodename[sizeof(nodename)-1] = '\0';
+ ZCRYPT_NAME "_%d", (int)MINOR(devt));
+ nodename[sizeof(nodename) - 1] = '\0';
if (dev_set_name(&zcdndev->device, nodename)) {
rc = -EINVAL;
goto unlockout;
@@ -519,7 +519,7 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
/*
* zcrypt_write(): Not allowed.
*
- * Write is is not allowed
+ * Write is not allowed
*/
static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
@@ -549,7 +549,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
perms = &zcdndev->perms;
}
#endif
- filp->private_data = (void *) perms;
+ filp->private_data = (void *)perms;
atomic_inc(&zcrypt_open_count);
return stream_open(inode, filp);
@@ -713,7 +713,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for useable accelarator or CCA card */
+ /* Check for usable accelarator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
@@ -733,7 +733,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is useable and eligible */
+ /* check if device is usable and eligible */
if (!zq->online || !zq->ops->rsa_modexpo ||
!zq->queue->config || zq->queue->chkstop)
continue;
@@ -823,7 +823,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for useable accelarator or CCA card */
+ /* Check for usable accelarator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x18000000))
continue;
@@ -843,7 +843,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is useable and eligible */
+ /* check if device is usable and eligible */
if (!zq->online || !zq->ops->rsa_modexpo_crt ||
!zq->queue->config || zq->queue->chkstop)
continue;
@@ -893,7 +893,7 @@ out:
static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
struct zcrypt_track *tr,
- struct ica_xcRB *xcRB)
+ struct ica_xcRB *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -904,9 +904,9 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
- trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+ trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
- xcRB->status = 0;
+ xcrb->status = 0;
ap_init_message(&ap_msg);
#ifdef CONFIG_ZCRYPT_DEBUG
@@ -915,11 +915,11 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
__func__, tr->fi.cmd);
- xcRB->agent_ID = 0x4646;
+ xcrb->agent_ID = 0x4646;
}
#endif
- rc = prep_cca_ap_msg(userspace, xcRB, &ap_msg, &func_code, &domain);
+ rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -948,13 +948,13 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for useable CCA card */
+ /* Check for usable CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x10000000))
continue;
/* Check for user selected CCA card */
- if (xcRB->user_defined != AUTOSELECT &&
- xcRB->user_defined != zc->card->id)
+ if (xcrb->user_defined != AUTOSELECT &&
+ xcrb->user_defined != zc->card->id)
continue;
/* check if request size exceeds card max msg size */
if (ap_msg.len > zc->card->maxmsgsize)
@@ -971,7 +971,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check for device useable and eligible */
+ /* check for device usable and eligible */
if (!zq->online || !zq->ops->send_cprb ||
!zq->queue->config || zq->queue->chkstop ||
(tdom != AUTOSEL_DOM &&
@@ -998,7 +998,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, xcRB->user_defined, *domain);
+ __func__, xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@@ -1016,7 +1016,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
}
#endif
- rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
+ rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -1028,14 +1028,14 @@ out:
tr->last_rc = rc;
tr->last_qid = qid;
}
- trace_s390_zcrypt_rep(xcRB, func_code, rc,
+ trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
-long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+long zcrypt_send_cprb(struct ica_xcRB *xcrb)
{
- return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
+ return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
}
EXPORT_SYMBOL(zcrypt_send_cprb);
@@ -1089,7 +1089,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
ap_msg.fi.cmd = tr->fi.cmd;
#endif
- target_num = (unsigned short) xcrb->targets_num;
+ target_num = (unsigned short)xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
targets = NULL;
@@ -1103,9 +1103,9 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
goto out;
}
- uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+ uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
if (z_copy_from_user(userspace, targets, uptr,
- target_num * sizeof(*targets))) {
+ target_num * sizeof(*targets))) {
func_code = 0;
rc = -EFAULT;
goto out_free;
@@ -1132,7 +1132,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for useable EP11 card */
+ /* Check for usable EP11 card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x04000000))
continue;
@@ -1155,7 +1155,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is useable and eligible */
+ /* check if device is usable and eligible */
if (!zq->online || !zq->ops->send_ep11_cprb ||
!zq->queue->config || zq->queue->chkstop ||
(targets &&
@@ -1184,11 +1184,11 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, (int) targets->ap_id,
- (int) targets->dom_id);
+ __func__, (int)targets->ap_id,
+ (int)targets->dom_id);
} else if (targets) {
ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
- __func__, (int) target_num);
+ __func__, (int)target_num);
} else {
ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
__func__);
@@ -1245,7 +1245,7 @@ static long zcrypt_rng(char *buffer)
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for useable CCA card */
+ /* Check for usable CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->functions & 0x10000000))
continue;
@@ -1254,7 +1254,7 @@ static long zcrypt_rng(char *buffer)
if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is useable and eligible */
+ /* check if device is usable and eligible */
if (!zq->online || !zq->ops->rng ||
!zq->queue->config || zq->queue->chkstop)
continue;
@@ -1270,7 +1270,7 @@ static long zcrypt_rng(char *buffer)
if (!pref_zq) {
ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ __func__);
rc = -ENODEV;
goto out;
}
@@ -1381,8 +1381,8 @@ static void zcrypt_status_mask(char status[], size_t max_adapters)
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
- || card >= max_adapters)
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
+ card >= max_adapters)
continue;
status[card] = zc->online ? zc->user_space_type : 0x0d;
}
@@ -1402,8 +1402,8 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
- || card >= max_adapters)
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
+ card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
qdepth[card] =
@@ -1429,13 +1429,13 @@ static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
- || card >= max_adapters)
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
+ card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
cnt = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
- reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
+ reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
}
}
local_bh_enable();
@@ -1493,7 +1493,7 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
int rc;
struct zcrypt_track tr;
struct ica_rsa_modexpo mex;
- struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+ struct ica_rsa_modexpo __user *umex = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&mex, umex, sizeof(mex)))
@@ -1538,7 +1538,7 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
int rc;
struct zcrypt_track tr;
struct ica_rsa_modexpo_crt crt;
- struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+ struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
if (copy_from_user(&crt, ucrt, sizeof(crt)))
@@ -1581,25 +1581,25 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
- struct ica_xcRB xcRB;
+ struct ica_xcRB xcrb;
struct zcrypt_track tr;
- struct ica_xcRB __user *uxcRB = (void __user *) arg;
+ struct ica_xcRB __user *uxcrb = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
- if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
#ifdef CONFIG_ZCRYPT_DEBUG
- if ((xcRB.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
+ if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- tr.fi.cmd = (u16)(xcRB.status >> 16);
+ tr.fi.cmd = (u16)(xcrb.status >> 16);
}
- xcRB.status = 0;
+ xcrb.status = 0;
#endif
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
#ifdef CONFIG_ZCRYPT_DEBUG
@@ -1610,7 +1610,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
@@ -1618,8 +1618,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
rc = -EIO;
if (rc)
ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
- rc, xcRB.status);
- if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+ rc, xcrb.status);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
}
@@ -1674,7 +1674,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
{
int rc;
struct ap_perms *perms =
- (struct ap_perms *) filp->private_data;
+ (struct ap_perms *)filp->private_data;
rc = zcrypt_check_ioctl(perms, cmd);
if (rc)
@@ -1698,7 +1698,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
- if (copy_to_user((char __user *) arg, device_status,
+ if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
kfree(device_status);
@@ -1708,7 +1708,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char status[AP_DEVICES];
zcrypt_status_mask(status, AP_DEVICES);
- if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ if (copy_to_user((char __user *)arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
@@ -1716,7 +1716,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char qdepth[AP_DEVICES];
zcrypt_qdepth_mask(qdepth, AP_DEVICES);
- if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
@@ -1727,21 +1727,21 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
- if (copy_to_user((int __user *) arg, reqcnt,
+ if (copy_to_user((int __user *)arg, reqcnt,
sizeof(u32) * AP_DEVICES))
rc = -EFAULT;
kfree(reqcnt);
return rc;
}
case Z90STAT_REQUESTQ_COUNT:
- return put_user(zcrypt_requestq_count(), (int __user *) arg);
+ return put_user(zcrypt_requestq_count(), (int __user *)arg);
case Z90STAT_PENDINGQ_COUNT:
- return put_user(zcrypt_pendingq_count(), (int __user *) arg);
+ return put_user(zcrypt_pendingq_count(), (int __user *)arg);
case Z90STAT_TOTALOPEN_COUNT:
return put_user(atomic_read(&zcrypt_open_count),
- (int __user *) arg);
+ (int __user *)arg);
case Z90STAT_DOMAIN_INDEX:
- return put_user(ap_domain_index, (int __user *) arg);
+ return put_user(ap_domain_index, (int __user *)arg);
/*
* Deprecated ioctls
*/
@@ -1755,7 +1755,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask(device_status);
- if (copy_to_user((char __user *) arg, device_status,
+ if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
kfree(device_status);
@@ -1766,7 +1766,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char status[MAX_ZDEV_CARDIDS];
zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
- if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ if (copy_to_user((char __user *)arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
@@ -1775,7 +1775,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char qdepth[MAX_ZDEV_CARDIDS];
zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
- if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
@@ -1784,7 +1784,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
return -EFAULT;
return 0;
}
@@ -1899,7 +1899,7 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
&ucrt32->outputdatalength);
}
-struct compat_ica_xcRB {
+struct compat_ica_xcrb {
unsigned short agent_ID;
unsigned int user_defined;
unsigned short request_ID;
@@ -1919,66 +1919,66 @@ struct compat_ica_xcRB {
unsigned int status;
} __packed;
-static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
+static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
- struct compat_ica_xcRB xcRB32;
+ struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
+ struct compat_ica_xcrb xcrb32;
struct zcrypt_track tr;
- struct ica_xcRB xcRB64;
+ struct ica_xcRB xcrb64;
long rc;
memset(&tr, 0, sizeof(tr));
- if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
+ if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
return -EFAULT;
- xcRB64.agent_ID = xcRB32.agent_ID;
- xcRB64.user_defined = xcRB32.user_defined;
- xcRB64.request_ID = xcRB32.request_ID;
- xcRB64.request_control_blk_length =
- xcRB32.request_control_blk_length;
- xcRB64.request_control_blk_addr =
- compat_ptr(xcRB32.request_control_blk_addr);
- xcRB64.request_data_length =
- xcRB32.request_data_length;
- xcRB64.request_data_address =
- compat_ptr(xcRB32.request_data_address);
- xcRB64.reply_control_blk_length =
- xcRB32.reply_control_blk_length;
- xcRB64.reply_control_blk_addr =
- compat_ptr(xcRB32.reply_control_blk_addr);
- xcRB64.reply_data_length = xcRB32.reply_data_length;
- xcRB64.reply_data_addr =
- compat_ptr(xcRB32.reply_data_addr);
- xcRB64.priority_window = xcRB32.priority_window;
- xcRB64.status = xcRB32.status;
+ xcrb64.agent_ID = xcrb32.agent_ID;
+ xcrb64.user_defined = xcrb32.user_defined;
+ xcrb64.request_ID = xcrb32.request_ID;
+ xcrb64.request_control_blk_length =
+ xcrb32.request_control_blk_length;
+ xcrb64.request_control_blk_addr =
+ compat_ptr(xcrb32.request_control_blk_addr);
+ xcrb64.request_data_length =
+ xcrb32.request_data_length;
+ xcrb64.request_data_address =
+ compat_ptr(xcrb32.request_data_address);
+ xcrb64.reply_control_blk_length =
+ xcrb32.reply_control_blk_length;
+ xcrb64.reply_control_blk_addr =
+ compat_ptr(xcrb32.reply_control_blk_addr);
+ xcrb64.reply_data_length = xcrb32.reply_data_length;
+ xcrb64.reply_data_addr =
+ compat_ptr(xcrb32.reply_data_addr);
+ xcrb64.priority_window = xcrb32.priority_window;
+ xcrb64.status = xcrb32.status;
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
if (rc == -EAGAIN)
tr.again_counter++;
} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
- xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
- xcRB32.reply_data_length = xcRB64.reply_data_length;
- xcRB32.status = xcRB64.status;
- if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
+ xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
+ xcrb32.reply_data_length = xcrb64.reply_data_length;
+ xcrb32.status = xcrb64.status;
+ if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
return -EFAULT;
return rc;
}
static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
int rc;
struct ap_perms *perms =
- (struct ap_perms *) filp->private_data;
+ (struct ap_perms *)filp->private_data;
rc = zcrypt_check_ioctl(perms, cmd);
if (rc)
@@ -1989,7 +1989,7 @@ static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
if (cmd == ICARSACRT)
return trans_modexpo_crt32(perms, filp, cmd, arg);
if (cmd == ZSECSENDCPRB)
- return trans_xcRB32(perms, filp, cmd, arg);
+ return trans_xcrb32(perms, filp, cmd, arg);
return zcrypt_unlocked_ioctl(filp, cmd, arg);
}
#endif
@@ -2033,10 +2033,10 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
* read method calls.
*/
if (zcrypt_rng_buffer_index == 0) {
- rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+ rc = zcrypt_rng((char *)zcrypt_rng_buffer);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+ rc = zcrypt_rng((char *)zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
zcrypt_rng_buffer_index = rc / sizeof(*data);
@@ -2057,7 +2057,7 @@ int zcrypt_rng_device_add(void)
mutex_lock(&zcrypt_rng_mutex);
if (zcrypt_rng_device_count == 0) {
- zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
+ zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
if (!zcrypt_rng_buffer) {
rc = -ENOMEM;
goto out;
@@ -2069,13 +2069,14 @@ int zcrypt_rng_device_add(void)
if (rc)
goto out_free;
zcrypt_rng_device_count = 1;
- } else
+ } else {
zcrypt_rng_device_count++;
+ }
mutex_unlock(&zcrypt_rng_mutex);
return 0;
out_free:
- free_page((unsigned long) zcrypt_rng_buffer);
+ free_page((unsigned long)zcrypt_rng_buffer);
out:
mutex_unlock(&zcrypt_rng_mutex);
return rc;
@@ -2087,7 +2088,7 @@ void zcrypt_rng_device_remove(void)
zcrypt_rng_device_count--;
if (zcrypt_rng_device_count == 0) {
hwrng_unregister(&zcrypt_rng_dev);
- free_page((unsigned long) zcrypt_rng_buffer);
+ free_page((unsigned long)zcrypt_rng_buffer);
}
mutex_unlock(&zcrypt_rng_mutex);
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 93e77e83ad14..f299deb8b8c7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -170,7 +170,7 @@ static inline unsigned long z_copy_from_user(bool userspace,
{
if (likely(userspace))
return copy_from_user(to, from, n);
- memcpy(to, (void __force *) from, n);
+ memcpy(to, (void __force *)from, n);
return 0;
}
@@ -181,7 +181,7 @@ static inline unsigned long z_copy_to_user(bool userspace,
{
if (likely(userspace))
return copy_to_user(to, from, n);
- memcpy((void __force *) to, from, n);
+ memcpy((void __force *)to, from, n);
return 0;
}
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index fcbd537530e8..6ca675042416 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -138,7 +138,7 @@ struct zcrypt_card *zcrypt_card_alloc(void)
{
struct zcrypt_card *zc;
- zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+ zc = kzalloc(sizeof(*zc), GFP_KERNEL);
if (!zc)
return NULL;
INIT_LIST_HEAD(&zc->list);
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index f09bb850763b..6229ba9c56d9 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -11,7 +11,7 @@
#ifndef _ZCRYPT_CCA_KEY_H_
#define _ZCRYPT_CCA_KEY_H_
-struct T6_keyBlock_hdr {
+struct t6_keyblock_hdr {
unsigned short blen;
unsigned short ulen;
unsigned short flags;
@@ -63,7 +63,7 @@ struct cca_public_sec {
* complement of the residue modulo 8 of the sum of
* (p_len + q_len + dp_len + dq_len + u_len).
*/
-struct cca_pvt_ext_CRT_sec {
+struct cca_pvt_ext_crt_sec {
unsigned char section_identifier;
unsigned char version;
unsigned short section_length;
@@ -108,9 +108,9 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
.section_identifier = 0x04,
};
struct {
- struct T6_keyBlock_hdr t6_hdr;
- struct cca_token_hdr pubHdr;
- struct cca_public_sec pubSec;
+ struct t6_keyblock_hdr t6_hdr;
+ struct cca_token_hdr pubhdr;
+ struct cca_public_sec pubsec;
char exponent[0];
} __packed *key = p;
unsigned char *temp;
@@ -127,8 +127,8 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
memset(key, 0, sizeof(*key));
- key->pubHdr = static_pub_hdr;
- key->pubSec = static_pub_sec;
+ key->pubhdr = static_pub_hdr;
+ key->pubsec = static_pub_sec;
/* key parameter block */
temp = key->exponent;
@@ -146,16 +146,16 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
return -EFAULT;
- key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
- key->pubSec.modulus_byte_len = mex->inputdatalength;
- key->pubSec.exponent_len = mex->inputdatalength - i;
- key->pubSec.section_length = sizeof(key->pubSec) +
- 2*mex->inputdatalength - i;
- key->pubHdr.token_length =
- key->pubSec.section_length + sizeof(key->pubHdr);
- key->t6_hdr.ulen = key->pubHdr.token_length + 4;
- key->t6_hdr.blen = key->pubHdr.token_length + 6;
- return sizeof(*key) + 2*mex->inputdatalength - i;
+ key->pubsec.modulus_bit_len = 8 * mex->inputdatalength;
+ key->pubsec.modulus_byte_len = mex->inputdatalength;
+ key->pubsec.exponent_len = mex->inputdatalength - i;
+ key->pubsec.section_length = sizeof(key->pubsec) +
+ 2 * mex->inputdatalength - i;
+ key->pubhdr.token_length =
+ key->pubsec.section_length + sizeof(key->pubhdr);
+ key->t6_hdr.ulen = key->pubhdr.token_length + 4;
+ key->t6_hdr.blen = key->pubhdr.token_length + 6;
+ return sizeof(*key) + 2 * mex->inputdatalength - i;
}
/**
@@ -177,9 +177,9 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
};
static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
struct {
- struct T6_keyBlock_hdr t6_hdr;
+ struct t6_keyblock_hdr t6_hdr;
struct cca_token_hdr token;
- struct cca_pvt_ext_CRT_sec pvt;
+ struct cca_pvt_ext_crt_sec pvt;
char key_parts[0];
} __packed *key = p;
struct cca_public_sec *pub;
@@ -198,8 +198,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
short_len = (crt->inputdatalength + 1) / 2;
long_len = short_len + 8;
- pad_len = -(3*long_len + 2*short_len) & 7;
- key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+ pad_len = -(3 * long_len + 2 * short_len) & 7;
+ key_len = 3 * long_len + 2 * short_len + pad_len + crt->inputdatalength;
size = sizeof(*key) + key_len + sizeof(*pub) + 3;
/* parameter block.key block */
@@ -223,15 +223,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
/* key parts */
if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
copy_from_user(key->key_parts + long_len,
- crt->nq_prime, short_len) ||
+ crt->nq_prime, short_len) ||
copy_from_user(key->key_parts + long_len + short_len,
- crt->bp_key, long_len) ||
- copy_from_user(key->key_parts + 2*long_len + short_len,
- crt->bq_key, short_len) ||
- copy_from_user(key->key_parts + 2*long_len + 2*short_len,
- crt->u_mult_inv, long_len))
+ crt->bp_key, long_len) ||
+ copy_from_user(key->key_parts + 2 * long_len + short_len,
+ crt->bq_key, short_len) ||
+ copy_from_user(key->key_parts + 2 * long_len + 2 * short_len,
+ crt->u_mult_inv, long_len))
return -EFAULT;
- memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
+ memset(key->key_parts + 3 * long_len + 2 * short_len + pad_len,
0xff, crt->inputdatalength);
pub = (struct cca_public_sec *)(key->key_parts + key_len);
*pub = static_cca_pub_sec;
@@ -241,7 +241,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
* section. So, an arbitrary public exponent of 0x010001 will be
* used.
*/
- memcpy((char *) (pub + 1), pk_exponent, 3);
+ memcpy((char *)(pub + 1), pk_exponent, 3);
return size;
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 6a3c2b460965..60ba20a133be 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -53,26 +53,26 @@ static DEFINE_SPINLOCK(cca_info_list_lock);
int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
const u8 *token, int keybitsize)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+ struct secaeskeytoken *t = (struct secaeskeytoken *)token;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (t->type != TOKTYPE_CCA_INTERNAL) {
if (dbg)
DBF("%s token check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ __func__, (int)t->type, TOKTYPE_CCA_INTERNAL);
return -EINVAL;
}
if (t->version != TOKVER_CCA_AES) {
if (dbg)
DBF("%s token check failed, version 0x%02x != 0x%02x\n",
- __func__, (int) t->version, TOKVER_CCA_AES);
+ __func__, (int)t->version, TOKVER_CCA_AES);
return -EINVAL;
}
if (keybitsize > 0 && t->bitsize != keybitsize) {
if (dbg)
DBF("%s token check failed, bitsize %d != %d\n",
- __func__, (int) t->bitsize, keybitsize);
+ __func__, (int)t->bitsize, keybitsize);
return -EINVAL;
}
@@ -93,7 +93,7 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
const u8 *token, int keybitsize,
int checkcpacfexport)
{
- struct cipherkeytoken *t = (struct cipherkeytoken *) token;
+ struct cipherkeytoken *t = (struct cipherkeytoken *)token;
bool keybitsizeok = true;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
@@ -101,37 +101,37 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
if (t->type != TOKTYPE_CCA_INTERNAL) {
if (dbg)
DBF("%s token check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ __func__, (int)t->type, TOKTYPE_CCA_INTERNAL);
return -EINVAL;
}
if (t->version != TOKVER_CCA_VLSC) {
if (dbg)
DBF("%s token check failed, version 0x%02x != 0x%02x\n",
- __func__, (int) t->version, TOKVER_CCA_VLSC);
+ __func__, (int)t->version, TOKVER_CCA_VLSC);
return -EINVAL;
}
if (t->algtype != 0x02) {
if (dbg)
DBF("%s token check failed, algtype 0x%02x != 0x02\n",
- __func__, (int) t->algtype);
+ __func__, (int)t->algtype);
return -EINVAL;
}
if (t->keytype != 0x0001) {
if (dbg)
DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
- __func__, (int) t->keytype);
+ __func__, (int)t->keytype);
return -EINVAL;
}
if (t->plfver != 0x00 && t->plfver != 0x01) {
if (dbg)
DBF("%s token check failed, unknown plfver 0x%02x\n",
- __func__, (int) t->plfver);
+ __func__, (int)t->plfver);
return -EINVAL;
}
if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
if (dbg)
DBF("%s token check failed, unknown wpllen %d\n",
- __func__, (int) t->wpllen);
+ __func__, (int)t->wpllen);
return -EINVAL;
}
if (keybitsize > 0) {
@@ -180,26 +180,26 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
const u8 *token, size_t keysize,
int checkcpacfexport)
{
- struct eccprivkeytoken *t = (struct eccprivkeytoken *) token;
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)token;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
if (dbg)
DBF("%s token check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA);
+ __func__, (int)t->type, TOKTYPE_CCA_INTERNAL_PKA);
return -EINVAL;
}
if (t->len > keysize) {
if (dbg)
DBF("%s token check failed, len %d > keysize %zu\n",
- __func__, (int) t->len, keysize);
+ __func__, (int)t->len, keysize);
return -EINVAL;
}
if (t->secid != 0x20) {
if (dbg)
DBF("%s token check failed, secid 0x%02x != 0x20\n",
- __func__, (int) t->secid);
+ __func__, (int)t->secid);
return -EINVAL;
}
if (checkcpacfexport && !(t->kutc & 0x01)) {
@@ -222,9 +222,9 @@ EXPORT_SYMBOL(cca_check_sececckeytoken);
* on failure.
*/
static int alloc_and_prep_cprbmem(size_t paramblen,
- u8 **pcprbmem,
- struct CPRBX **preqCPRB,
- struct CPRBX **prepCPRB)
+ u8 **p_cprb_mem,
+ struct CPRBX **p_req_cprb,
+ struct CPRBX **p_rep_cprb)
{
u8 *cprbmem;
size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
@@ -238,8 +238,8 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
if (!cprbmem)
return -ENOMEM;
- preqcblk = (struct CPRBX *) cprbmem;
- prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
+ preqcblk = (struct CPRBX *)cprbmem;
+ prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen);
/* fill request cprb struct */
preqcblk->cprb_len = sizeof(struct CPRBX);
@@ -248,14 +248,14 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
preqcblk->rpl_msgbl = cprbplusparamblen;
if (paramblen) {
preqcblk->req_parmb =
- ((u8 __user *) preqcblk) + sizeof(struct CPRBX);
+ ((u8 __user *)preqcblk) + sizeof(struct CPRBX);
preqcblk->rpl_parmb =
- ((u8 __user *) prepcblk) + sizeof(struct CPRBX);
+ ((u8 __user *)prepcblk) + sizeof(struct CPRBX);
}
- *pcprbmem = cprbmem;
- *preqCPRB = preqcblk;
- *prepCPRB = prepcblk;
+ *p_cprb_mem = cprbmem;
+ *p_req_cprb = preqcblk;
+ *p_rep_cprb = prepcblk;
return 0;
}
@@ -286,9 +286,9 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb,
pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
pxcrb->request_control_blk_length =
preqcblk->cprb_len + preqcblk->req_parml;
- pxcrb->request_control_blk_addr = (void __user *) preqcblk;
+ pxcrb->request_control_blk_addr = (void __user *)preqcblk;
pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
- pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
+ pxcrb->reply_control_blk_addr = (void __user *)prepcblk;
}
/*
@@ -345,7 +345,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with KG request */
- preqparm = (struct kgreqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct kgreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "KG", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
@@ -387,7 +387,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -395,16 +395,16 @@ int cca_genseckey(u16 cardnr, u16 domain,
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct kgrepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct kgrepparm *)ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
@@ -419,7 +419,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
/* check secure key token */
rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
- prepparm->lv3.keyblock.tok, 8*keysize);
+ prepparm->lv3.keyblock.tok, 8 * keysize);
if (rc) {
rc = -EIO;
goto out;
@@ -486,7 +486,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
preqcblk->domain = domain;
/* fill request cprb param block with CM request */
- preqparm = (struct cmreqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct cmreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "CM", 2);
memcpy(preqparm->rule_array, "AES ", 8);
preqparm->rule_array_len =
@@ -512,7 +512,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
}
preqparm->lv1.len = sizeof(struct lv1) + keysize;
memcpy(preqparm->lv1.clrkey, clrkey, keysize);
- plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
+ plv2 = (struct lv2 *)(((u8 *)&preqparm->lv2) + keysize);
plv2->len = sizeof(struct lv2);
plv2->keyid.len = sizeof(struct keyid);
plv2->keyid.attr = 0x30;
@@ -525,7 +525,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -533,16 +533,16 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct cmrepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct cmrepparm *)ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
@@ -557,7 +557,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
/* check secure key token */
rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
- prepparm->lv3.keyblock.tok, 8*keysize);
+ prepparm->lv3.keyblock.tok, 8 * keysize);
if (rc) {
rc = -EIO;
goto out;
@@ -632,7 +632,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with USK request */
- preqparm = (struct uskreqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct uskreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "US", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
@@ -652,7 +652,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -660,8 +660,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -671,37 +671,37 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
if (prepcblk->ccp_rscode != 0) {
DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct uskrepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct uskrepparm *)ptr;
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
prepparm->lv3.ckb.version != 0x02) {
DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int) prepparm->lv3.ckb.version);
+ __func__, (int)prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
/* copy the tanslated protected key */
switch (prepparm->lv3.ckb.len) {
- case 16+32:
+ case 16 + 32:
/* AES 128 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_128;
break;
- case 24+32:
+ case 24 + 32:
/* AES 192 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_192;
break;
- case 32+32:
+ case 32 + 32:
/* AES 256 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_256;
@@ -751,7 +751,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
struct gkreqparm {
u8 subfunc_code[2];
u16 rule_array_len;
- char rule_array[2*8];
+ char rule_array[2 * 8];
struct {
u16 len;
u8 key_type_1[8];
@@ -827,10 +827,10 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
preqcblk->req_parml = sizeof(struct gkreqparm);
/* prepare request param block with GK request */
- preqparm = (struct gkreqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct gkreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "GK", 2);
preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
- memcpy(preqparm->rule_array, "AES OP ", 2*8);
+ memcpy(preqparm->rule_array, "AES OP ", 2 * 8);
/* prepare vud block */
preqparm->vud.len = sizeof(preqparm->vud);
@@ -869,9 +869,9 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* patch the skeleton key token export flags inside the kb block */
if (keygenflags) {
- t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
- t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
- t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ t = (struct cipherkeytoken *)preqparm->kb.tlv3.gen_key_id_1;
+ t->kmf1 |= (u16)(keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16)~(keygenflags & 0x000000FF);
}
/* prepare xcrb struct */
@@ -882,7 +882,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -891,16 +891,16 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
DEBUG_ERR(
"%s cipher key generate failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct gkrepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct gkrepparm *)ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
@@ -921,7 +921,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
}
/* copy the generated vlsc key token */
- t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key;
+ t = (struct cipherkeytoken *)prepparm->kb.tlv1.gen_key;
if (keybuf) {
if (*keybufsize >= t->len)
memcpy(keybuf, t, t->len);
@@ -1006,7 +1006,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
preqcblk->req_parml = 0;
/* prepare request param block with IP request */
- preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb;
+ preq_ra_block = (struct rule_array_block __force *)preqcblk->req_parmb;
memcpy(preq_ra_block->subfunc_code, "IP", 2);
preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preq_ra_block->rule_array, rule_array_1, 8);
@@ -1050,7 +1050,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -1059,16 +1059,16 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
DEBUG_ERR(
"%s CSNBKPI2 failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct iprepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct iprepparm *)ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
@@ -1082,7 +1082,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* do not check the key here, it may be incomplete */
/* copy the vlsc key token back */
- t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token;
+ t = (struct cipherkeytoken *)prepparm->kb.tlv1.key_token;
memcpy(key_token, t, t->len);
*key_token_size = t->len;
@@ -1117,9 +1117,9 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
/* patch the skeleton key token export flags */
if (keygenflags) {
- t = (struct cipherkeytoken *) token;
- t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
- t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ t = (struct cipherkeytoken *)token;
+ t->kmf1 |= (u16)(keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16)~(keygenflags & 0x000000FF);
}
/*
@@ -1241,7 +1241,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
preqcblk->domain = domain;
/* fill request cprb param block with AU request */
- preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct aureqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "AU", 2);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len)
@@ -1267,7 +1267,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -1276,8 +1276,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
DEBUG_ERR(
"%s unwrap secure key failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -1288,44 +1288,44 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
DEBUG_WARN(
"%s unwrap secure key warning, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct aurepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct aurepparm *)ptr;
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
prepparm->vud.ckb.version != 0x02) {
DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int) prepparm->vud.ckb.version);
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x02) {
DEBUG_ERR(
"%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
- __func__, (int) prepparm->vud.ckb.algo);
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
switch (prepparm->vud.ckb.keylen) {
- case 16+32:
+ case 16 + 32:
/* AES 128 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_128;
break;
- case 24+32:
+ case 24 + 32:
/* AES 192 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_192;
break;
- case 32+32:
+ case 32 + 32:
/* AES 256 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_256;
@@ -1410,7 +1410,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
preqcblk->domain = domain;
/* fill request cprb param block with AU request */
- preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct aureqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "AU", 2);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len)
@@ -1436,7 +1436,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -1445,8 +1445,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
DEBUG_ERR(
"%s unwrap secure key failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -1457,26 +1457,26 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
DEBUG_WARN(
"%s unwrap secure key warning, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct aurepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct aurepparm *)ptr;
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x02) {
DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
- __func__, (int) prepparm->vud.ckb.version);
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x81) {
DEBUG_ERR(
"%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
- __func__, (int) prepparm->vud.ckb.algo);
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
@@ -1537,7 +1537,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with FQ request */
- preqparm = (struct fqreqparm __force *) preqcblk->req_parmb;
+ preqparm = (struct fqreqparm __force *)preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
@@ -1553,7 +1553,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -1561,20 +1561,20 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
if (prepcblk->ccp_rtcode != 0) {
DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
__func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
/* process response cprb param block */
- ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepcblk->rpl_parmb = (u8 __user *) ptr;
- prepparm = (struct fqrepparm *) ptr;
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct fqrepparm *)ptr;
ptr = prepparm->lvdata;
/* check and possibly copy reply rule array */
- len = *((u16 *) ptr);
+ len = *((u16 *)ptr);
if (len > sizeof(u16)) {
ptr += sizeof(u16);
len -= sizeof(u16);
@@ -1585,7 +1585,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
ptr += len;
}
/* check and possible copy reply var array */
- len = *((u16 *) ptr);
+ len = *((u16 *)ptr);
if (len > sizeof(u16)) {
ptr += sizeof(u16);
len -= sizeof(u16);
@@ -1696,21 +1696,30 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
ci->hwtype = devstat.hwtype;
/* prep page for rule array and var array use */
- pg = (u8 *) __get_free_page(GFP_KERNEL);
+ pg = (u8 *)__get_free_page(GFP_KERNEL);
if (!pg)
return -ENOMEM;
rarray = pg;
- varray = pg + PAGE_SIZE/2;
- rlen = vlen = PAGE_SIZE/2;
+ varray = pg + PAGE_SIZE / 2;
+ rlen = vlen = PAGE_SIZE / 2;
/* QF for this card/domain */
rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
rarray, &rlen, varray, &vlen);
- if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
+ if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) {
memcpy(ci->serial, rarray, 8);
- ci->new_aes_mk_state = (char) rarray[7*8];
- ci->cur_aes_mk_state = (char) rarray[8*8];
- ci->old_aes_mk_state = (char) rarray[9*8];
+ ci->new_asym_mk_state = (char)rarray[4 * 8];
+ ci->cur_asym_mk_state = (char)rarray[5 * 8];
+ ci->old_asym_mk_state = (char)rarray[6 * 8];
+ if (ci->old_asym_mk_state == '2')
+ memcpy(ci->old_asym_mkvp, varray + 64, 16);
+ if (ci->cur_asym_mk_state == '2')
+ memcpy(ci->cur_asym_mkvp, varray + 84, 16);
+ if (ci->new_asym_mk_state == '3')
+ memcpy(ci->new_asym_mkvp, varray + 104, 16);
+ ci->new_aes_mk_state = (char)rarray[7 * 8];
+ ci->cur_aes_mk_state = (char)rarray[8 * 8];
+ ci->old_aes_mk_state = (char)rarray[9 * 8];
if (ci->old_aes_mk_state == '2')
memcpy(&ci->old_aes_mkvp, varray + 172, 8);
if (ci->cur_aes_mk_state == '2')
@@ -1721,13 +1730,13 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
}
if (!found)
goto out;
- rlen = vlen = PAGE_SIZE/2;
+ rlen = vlen = PAGE_SIZE / 2;
rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
rarray, &rlen, varray, &vlen);
- if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
- ci->new_apka_mk_state = (char) rarray[10*8];
- ci->cur_apka_mk_state = (char) rarray[11*8];
- ci->old_apka_mk_state = (char) rarray[12*8];
+ if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) {
+ ci->new_apka_mk_state = (char)rarray[10 * 8];
+ ci->cur_apka_mk_state = (char)rarray[11 * 8];
+ ci->old_apka_mk_state = (char)rarray[12 * 8];
if (ci->old_apka_mk_state == '2')
memcpy(&ci->old_apka_mkvp, varray + 208, 8);
if (ci->cur_apka_mk_state == '2')
@@ -1738,7 +1747,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
}
out:
- free_page((unsigned long) pg);
+ free_page((unsigned long)pg);
return found == 2 ? 0 : -ENOENT;
}
@@ -1846,8 +1855,9 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
if (pdomain)
*pdomain = dom;
rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1);
- } else
+ } else {
rc = -ENODEV;
+ }
kvfree(device_status);
return rc;
@@ -1861,7 +1871,7 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
{
u64 mkvp;
int minhwtype = 0;
- const struct keytoken_header *hdr = (struct keytoken_header *) key;
+ const struct keytoken_header *hdr = (struct keytoken_header *)key;
if (hdr->type != TOKTYPE_CCA_INTERNAL)
return -EINVAL;
@@ -1954,7 +1964,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
}
/* apqn passed all filtering criterons, add to the array */
if (_nr_apqns < 256)
- _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
}
/* nothing found ? */
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 3513cd8ab9bc..78bf5631848e 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -251,12 +251,18 @@ struct cca_info {
char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */
char cur_apka_mk_state; /* '1' invalid, '2' valid */
char old_apka_mk_state; /* '1' invalid, '2' valid */
+ char new_asym_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_asym_mk_state; /* '1' invalid, '2' valid */
+ char old_asym_mk_state; /* '1' invalid, '2' valid */
u64 new_aes_mkvp; /* truncated sha256 of new aes master key */
u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */
u64 old_aes_mkvp; /* truncated sha256 of old aes master key */
u64 new_apka_mkvp; /* truncated sha256 of new apka master key */
u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */
u64 old_apka_mkvp; /* truncated sha256 of old apka mk */
+ u8 new_asym_mkvp[16]; /* verify pattern of new asym master key */
+ u8 cur_asym_mkvp[16]; /* verify pattern of current asym master key */
+ u8 old_asym_mkvp[16]; /* verify pattern of old asym master key */
char serial[9]; /* serial number (8 ascii numbers + 0x00) */
};
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 2bd49950ba81..83f692c9c197 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -34,10 +34,11 @@
#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
* (max outputdatalength) +
- * type80_hdr*/
+ * type80_hdr
+ */
#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
-#define CEX2A_CLEANUP_TIME (15*HZ)
+#define CEX2A_CLEANUP_TIME (15 * HZ)
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
MODULE_AUTHOR("IBM Corporation");
@@ -117,9 +118,8 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
zc->online = 1;
rc = zcrypt_card_register(zc);
- if (rc) {
+ if (rc)
zcrypt_card_free(zc);
- }
return rc;
}
@@ -176,9 +176,8 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
aq->request_timeout = CEX2A_CLEANUP_TIME;
dev_set_drvdata(&ap_dev->device, zq);
rc = zcrypt_queue_register(zq);
- if (rc) {
+ if (rc)
zcrypt_queue_free(zq);
- }
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 6360fdd06160..cb7849defce3 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -31,8 +31,8 @@
#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024)
-#define CEX2C_CLEANUP_TIME (15*HZ)
+#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024)
+#define CEX2C_CLEANUP_TIME (15 * HZ)
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \
@@ -200,11 +200,11 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
int rc, i;
ap_init_message(&ap_msg);
- ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+ ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL);
if (!ap_msg.msg)
return -ENOMEM;
- rng_type6CPRB_msgX(&ap_msg, 4, &domain);
+ rng_type6cprb_msgx(&ap_msg, 4, &domain);
msg = ap_msg.msg;
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
@@ -233,7 +233,7 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
else
rc = 0;
out_free:
- free_page((unsigned long) ap_msg.msg);
+ free_page((unsigned long)ap_msg.msg);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index fe5664c7589e..b03916b7538b 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -33,7 +33,7 @@
* But the maximum time limit managed by the stomper code is set to 60sec.
* Hence we have to wait at least that time period.
*/
-#define CEX4_CLEANUP_TIME (900*HZ)
+#define CEX4_CLEANUP_TIME (900 * HZ)
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \
@@ -123,11 +123,12 @@ static ssize_t cca_mkvps_show(struct device *dev,
&ci, zq->online);
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
- n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_aes_mk_state - '1'],
- ci.new_aes_mkvp);
+ n += scnprintf(buf + n, PAGE_SIZE,
+ "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
else
- n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE, "AES NEW: - -\n");
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
@@ -169,6 +170,33 @@ static ssize_t cca_mkvps_show(struct device *dev,
else
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+ if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3')
+ n += scnprintf(buf + n, PAGE_SIZE,
+ "ASYM NEW: %s 0x%016llx%016llx\n",
+ new_state[ci.new_asym_mk_state - '1'],
+ *((u64 *)(ci.new_asym_mkvp)),
+ *((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
+ else
+ n += scnprintf(buf + n, PAGE_SIZE, "ASYM NEW: - -\n");
+
+ if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "ASYM CUR: %s 0x%016llx%016llx\n",
+ cao_state[ci.cur_asym_mk_state - '1'],
+ *((u64 *)(ci.cur_asym_mkvp)),
+ *((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM CUR: - -\n");
+
+ if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "ASYM OLD: %s 0x%016llx%016llx\n",
+ cao_state[ci.old_asym_mk_state - '1'],
+ *((u64 *)(ci.old_asym_mkvp)),
+ *((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM OLD: - -\n");
+
return n;
}
@@ -336,8 +364,9 @@ static ssize_t ep11_mkvps_show(struct device *dev,
bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
n += 2 * sizeof(di.cur_wkvp);
n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
- } else
+ } else {
n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+ }
if (di.new_wk_state == '0') {
n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
@@ -348,8 +377,9 @@ static ssize_t ep11_mkvps_show(struct device *dev,
bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
n += 2 * sizeof(di.new_wkvp);
n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
- } else
+ } else {
n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+ }
return n;
}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 98d33f932b0b..b1c29017be5b 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -119,8 +119,8 @@ static void __exit card_cache_free(void)
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
{
- struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
- struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
@@ -133,38 +133,38 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
if (hdr->type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+ __func__, (int)hdr->type, TOKTYPE_NON_CCA);
return -EINVAL;
}
if (hdr->hver != 0x00) {
if (dbg)
DBF("%s key check failed, header version 0x%02x != 0x00\n",
- __func__, (int) hdr->hver);
+ __func__, (int)hdr->hver);
return -EINVAL;
}
if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
- __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER);
+ __func__, (int)hdr->version, TOKVER_EP11_AES_WITH_HEADER);
return -EINVAL;
}
if (hdr->len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
- __func__, (int) hdr->len, keylen);
+ __func__, (int)hdr->len, keylen);
return -EINVAL;
}
if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
if (dbg)
DBF("%s key check failed, header len %d < %zu\n",
- __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+ __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
- __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ __func__, (int)kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
@@ -186,8 +186,8 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
{
- struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
- struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
@@ -200,38 +200,38 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
if (hdr->type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+ __func__, (int)hdr->type, TOKTYPE_NON_CCA);
return -EINVAL;
}
if (hdr->hver != 0x00) {
if (dbg)
DBF("%s key check failed, header version 0x%02x != 0x00\n",
- __func__, (int) hdr->hver);
+ __func__, (int)hdr->hver);
return -EINVAL;
}
if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
- __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
+ __func__, (int)hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
return -EINVAL;
}
if (hdr->len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
- __func__, (int) hdr->len, keylen);
+ __func__, (int)hdr->len, keylen);
return -EINVAL;
}
if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
if (dbg)
DBF("%s key check failed, header len %d < %zu\n",
- __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+ __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
- __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ __func__, (int)kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
@@ -254,7 +254,7 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
{
- struct ep11keyblob *kb = (struct ep11keyblob *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
@@ -267,32 +267,32 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
if (kb->head.type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ __func__, (int)kb->head.type, TOKTYPE_NON_CCA);
return -EINVAL;
}
if (kb->head.version != TOKVER_EP11_AES) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
- __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ __func__, (int)kb->head.version, TOKVER_EP11_AES);
return -EINVAL;
}
if (kb->head.len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
- __func__, (int) kb->head.len, keylen);
+ __func__, (int)kb->head.len, keylen);
return -EINVAL;
}
if (kb->head.len < sizeof(*kb)) {
if (dbg)
DBF("%s key check failed, header len %d < %zu\n",
- __func__, (int) kb->head.len, sizeof(*kb));
+ __func__, (int)kb->head.len, sizeof(*kb));
return -EINVAL;
}
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
- __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ __func__, (int)kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
@@ -347,11 +347,11 @@ static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
}
if (valuelen > 127) {
ptr[1] = 0x81;
- ptr[2] = (u8) valuelen;
+ ptr[2] = (u8)valuelen;
memcpy(ptr + 3, pvalue, valuelen);
return 3 + valuelen;
}
- ptr[1] = (u8) valuelen;
+ ptr[1] = (u8)valuelen;
memcpy(ptr + 2, pvalue, valuelen);
return 2 + valuelen;
}
@@ -389,11 +389,11 @@ static inline void prep_urb(struct ep11_urb *u,
struct ep11_cprb *req, size_t req_len,
struct ep11_cprb *rep, size_t rep_len)
{
- u->targets = (u8 __user *) t;
+ u->targets = (u8 __user *)t;
u->targets_num = nt;
- u->req = (u8 __user *) req;
+ u->req = (u8 __user *)req;
u->req_len = req_len;
- u->resp = (u8 __user *) rep;
+ u->resp = (u8 __user *)rep;
u->resp_len = rep_len;
}
@@ -462,7 +462,6 @@ static int check_reply_pl(const u8 *pl, const char *func)
return 0;
}
-
/*
* Helper function which does an ep11 query with given query type.
*/
@@ -496,7 +495,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
req = alloc_cprb(sizeof(struct ep11_info_req_pl));
if (!req)
goto out;
- req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+ req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req));
prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
req_pl->query_type_tag = 0x04;
req_pl->query_type_len = sizeof(u32);
@@ -508,10 +507,10 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
if (!rep)
goto out;
- rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+ rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = cardnr;
@@ -524,7 +523,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int) cardnr, (int) domain, rc);
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -543,7 +542,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
goto out;
}
- memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+ memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
out:
kfree(req);
@@ -592,7 +591,7 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
return -ENOMEM;
rc = ep11_query_info(card, AUTOSEL_DOM,
0x01 /* module info query */,
- sizeof(*pmqi), (u8 *) pmqi);
+ sizeof(*pmqi), (u8 *)pmqi);
if (rc) {
if (rc == -ENODEV)
card_cache_scrub(card);
@@ -632,7 +631,7 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
return -ENOMEM;
rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
- sizeof(*p_dom_info), (u8 *) p_dom_info);
+ sizeof(*p_dom_info), (u8 *)p_dom_info);
if (rc)
goto out;
@@ -644,8 +643,8 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
info->cur_wk_state = '1';
memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
}
- if (p_dom_info->dom_flags & 0x04 /* new wk present */
- || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+ if (p_dom_info->dom_flags & 0x04 || /* new wk present */
+ p_dom_info->dom_flags & 0x08 /* new wk committed */) {
info->new_wk_state =
p_dom_info->dom_flags & 0x08 ? '2' : '1';
memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
@@ -722,7 +721,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
req = alloc_cprb(sizeof(struct keygen_req_pl));
if (!req)
goto out;
- req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+ req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req));
api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
req_pl->var_tag = 0x04;
@@ -746,10 +745,10 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
rep = alloc_cprb(sizeof(struct keygen_rep_pl));
if (!rep)
goto out;
- rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+ rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
@@ -762,7 +761,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int) card, (int) domain, rc);
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -784,7 +783,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* copy key blob and set header values */
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
*keybufsize = rep_pl->data_len;
- kb = (struct ep11keyblob *) keybuf;
+ kb = (struct ep11keyblob *)keybuf;
kb->head.type = TOKTYPE_NON_CCA;
kb->head.len = rep_pl->data_len;
kb->head.version = TOKVER_EP11_AES;
@@ -844,7 +843,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
- req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+ req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req));
prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
req_pl->var_tag = 0x04;
req_pl->var_len = sizeof(u32);
@@ -852,7 +851,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
- p = ((u8 *) req_pl) + sizeof(*req_pl);
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
if (iv) {
memcpy(p, iv, 16);
p += 16;
@@ -866,10 +865,10 @@ static int ep11_cryptsingle(u16 card, u16 domain,
rep = alloc_cprb(rep_pl_size);
if (!rep)
goto out;
- rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+ rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
@@ -882,7 +881,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int) card, (int) domain, rc);
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -894,13 +893,13 @@ static int ep11_cryptsingle(u16 card, u16 domain,
rc = -EIO;
goto out;
}
- p = ((u8 *) rep_pl) + sizeof(*rep_pl);
- if (rep_pl->data_lenfmt <= 127)
+ p = ((u8 *)rep_pl) + sizeof(*rep_pl);
+ if (rep_pl->data_lenfmt <= 127) {
n = rep_pl->data_lenfmt;
- else if (rep_pl->data_lenfmt == 0x81)
+ } else if (rep_pl->data_lenfmt == 0x81) {
n = *p++;
- else if (rep_pl->data_lenfmt == 0x82) {
- n = *((u16 *) p);
+ } else if (rep_pl->data_lenfmt == 0x82) {
+ n = *((u16 *)p);
p += 2;
} else {
DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
@@ -978,7 +977,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
- req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+ req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req));
api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
req_pl->attr_tag = 0x04;
@@ -994,7 +993,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
- p = ((u8 *) req_pl) + sizeof(*req_pl);
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
if (iv) {
memcpy(p, iv, 16);
p += 16;
@@ -1014,10 +1013,10 @@ static int ep11_unwrapkey(u16 card, u16 domain,
rep = alloc_cprb(sizeof(struct uw_rep_pl));
if (!rep)
goto out;
- rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+ rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
@@ -1030,7 +1029,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int) card, (int) domain, rc);
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1052,7 +1051,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
/* copy key blob and set header values */
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
*keybufsize = rep_pl->data_len;
- kb = (struct ep11keyblob *) keybuf;
+ kb = (struct ep11keyblob *)keybuf;
kb->head.type = TOKTYPE_NON_CCA;
kb->head.len = rep_pl->data_len;
kb->head.version = TOKVER_EP11_AES;
@@ -1105,7 +1104,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
u8 *p;
/* maybe the session field holds a header with key info */
- kb = (struct ep11keyblob *) key;
+ kb = (struct ep11keyblob *)key;
if (kb->head.type == TOKTYPE_NON_CCA &&
kb->head.version == TOKVER_EP11_AES) {
has_header = true;
@@ -1120,7 +1119,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
goto out;
if (!mech || mech == 0x80060001)
req->flags |= 0x20; /* CPACF_WRAP needs special bit */
- req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+ req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req));
api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
req_pl->var_tag = 0x04;
@@ -1129,7 +1128,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
req_pl->mech_tag = 0x04;
req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
- p = ((u8 *) req_pl) + sizeof(*req_pl);
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
if (iv) {
memcpy(p, iv, 16);
p += 16;
@@ -1152,10 +1151,10 @@ static int ep11_wrapkey(u16 card, u16 domain,
rep = alloc_cprb(sizeof(struct wk_rep_pl));
if (!rep)
goto out;
- rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+ rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ urb = kmalloc(sizeof(*urb), GFP_KERNEL);
if (!urb)
goto out;
target.ap_id = card;
@@ -1168,7 +1167,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int) card, (int) domain, rc);
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1206,9 +1205,9 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
u8 encbuf[64], *kek = NULL;
size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
- if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+ if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
clrkeylen = keybitsize / 8;
- else {
+ } else {
DEBUG_ERR(
"%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
@@ -1233,7 +1232,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
__func__, rc);
goto out;
}
- kb = (struct ep11keyblob *) kek;
+ kb = (struct ep11keyblob *)kek;
memset(&kb->head, 0, sizeof(kb->head));
/* Step 2: encrypt clear key value with the kek key */
@@ -1282,17 +1281,17 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
struct ep11kblob_header *hdr;
/* key with or without header ? */
- hdr = (struct ep11kblob_header *) keyblob;
- if (hdr->type == TOKTYPE_NON_CCA
- && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
- || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
- && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
+ hdr = (struct ep11kblob_header *)keyblob;
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
/* EP11 AES or ECC key with header */
key = keyblob + sizeof(struct ep11kblob_header);
keylen = hdr->len - sizeof(struct ep11kblob_header);
- } else if (hdr->type == TOKTYPE_NON_CCA
- && hdr->version == TOKVER_EP11_AES
- && is_ep11_keyblob(keyblob)) {
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(keyblob)) {
/* EP11 AES key (old style) */
key = keyblob;
keylen = hdr->len;
@@ -1300,8 +1299,9 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
/* raw EP11 key blob */
key = keyblob;
keylen = keybloblen;
- } else
+ } else {
return -EINVAL;
+ }
/* alloc temp working buffer */
wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
@@ -1318,12 +1318,12 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
__func__, rc);
goto out;
}
- wki = (struct wk_info *) wkbuf;
+ wki = (struct wk_info *)wkbuf;
/* check struct version and pkey type */
if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
- __func__, (int) wki->version, (int) wki->pkeytype);
+ __func__, (int)wki->version, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
@@ -1332,24 +1332,24 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
switch (wki->pkeytype) {
case 1: /* AES */
switch (wki->pkeysize) {
- case 16+32:
+ case 16 + 32:
/* AES 128 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_128;
break;
- case 24+32:
+ case 24 + 32:
/* AES 192 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_192;
break;
- case 32+32:
+ case 32 + 32:
/* AES 256 protected key */
if (protkeytype)
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
- __func__, (int) wki->pkeysize);
+ __func__, (int)wki->pkeysize);
rc = -EIO;
goto out;
}
@@ -1363,7 +1363,7 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
case 2: /* TDES */
default:
DEBUG_ERR("%s unknown/unsupported key type %d\n",
- __func__, (int) wki->pkeytype);
+ __func__, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
@@ -1445,7 +1445,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
}
/* apqn passed all filtering criterons, add to the array */
if (_nr_apqns < 256)
- _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
}
/* nothing found ? */
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index 1e02b197c003..07445041869f 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -50,7 +50,7 @@ struct ep11keyblob {
/* check ep11 key magic to find out if this is an ep11 key blob */
static inline bool is_ep11_keyblob(const u8 *key)
{
- struct ep11keyblob *kb = (struct ep11keyblob *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
return (kb->version == EP11_STRUCT_MAGIC);
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 8b0ce600b749..d36177e65a3d 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -121,10 +121,11 @@ static inline int convert_error(struct zcrypt_queue *zq,
ZCRYPT_DBF_WARN(
"%s dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
__func__, card, queue, ehdr->reply_code, apfs);
- } else
+ } else {
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
__func__, card, queue,
ehdr->reply_code);
+ }
return -EAGAIN;
default:
/* Assume request is valid and a retry will be worth it */
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 259145aa393f..7d245645fdd5 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -158,7 +158,6 @@ struct type80_hdr {
int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
{
-
if (!mex->inputdatalength)
return -EINVAL;
@@ -174,7 +173,6 @@ int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
{
-
if (!crt->inputdatalength)
return -EINVAL;
@@ -239,8 +237,9 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
inp = meb3->message + sizeof(meb3->message) - mod_len;
- } else
+ } else {
return -EINVAL;
+ }
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
copy_from_user(exp, mex->b_key, mod_len) ||
@@ -323,8 +322,9 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
dq = crb3->dq + sizeof(crb3->dq) - short_len;
u = crb3->u + sizeof(crb3->u) - short_len;
inp = crb3->message + sizeof(crb3->message) - mod_len;
- } else
+ } else {
return -EINVAL;
+ }
/*
* correct the offset of p, bp and mult_inv according zcrypt.h
@@ -392,7 +392,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
- unsigned char rtype = ((unsigned char *) reply->msg)[1];
+ unsigned char rtype = ((unsigned char *)reply->msg)[1];
switch (rtype) {
case TYPE82_RSP_CODE:
@@ -406,11 +406,11 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) rtype);
+ (int)rtype);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), (int) rtype);
+ AP_QID_QUEUE(zq->queue->qid), (int)rtype);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
@@ -447,10 +447,11 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq,
memcpy(msg->msg, reply->msg, len);
msg->len = len;
}
- } else
+ } else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ }
out:
- complete((struct completion *) msg->private);
+ complete((struct completion *)msg->private);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
@@ -475,7 +476,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_cex2a_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
@@ -492,9 +493,11 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
rc = convert_response_cex2a(zq, ap_msg,
mex->outputdata,
mex->outputdatalength);
- } else
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
out:
ap_msg->private = NULL;
if (rc)
@@ -524,7 +527,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_cex2a_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
@@ -541,9 +544,11 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
rc = convert_response_cex2a(zq, ap_msg,
crt->outputdata,
crt->outputdatalength);
- } else
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
out:
ap_msg->private = NULL;
if (rc)
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 57d885158cf0..8fb34b8eeb18 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2022
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -29,12 +29,13 @@
#define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
-#define CEIL4(x) ((((x)+3)/4)*4)
+#define CEIL4(x) ((((x) + 3) / 4) * 4)
struct response_type {
struct completion work;
int type;
};
+
#define CEXXC_RESPONSE_TYPE_ICA 0
#define CEXXC_RESPONSE_TYPE_XCRB 1
#define CEXXC_RESPONSE_TYPE_EP11 2
@@ -44,63 +45,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/*
- * CPRB
- * Note that all shorts, ints and longs are little-endian.
- * All pointer fields are 32-bits long, and mean nothing
- *
- * A request CPRB is followed by a request_parameter_block.
- *
- * The request (or reply) parameter block is organized thus:
- * function code
- * VUD block
- * key block
- */
-struct CPRB {
- unsigned short cprb_len; /* CPRB length */
- unsigned char cprb_ver_id; /* CPRB version id. */
- unsigned char pad_000; /* Alignment pad byte. */
- unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
- unsigned char srpi_verb; /* SRPI verb type */
- unsigned char flags; /* flags */
- unsigned char func_id[2]; /* function id */
- unsigned char checkpoint_flag; /* */
- unsigned char resv2; /* reserved */
- unsigned short req_parml; /* request parameter buffer */
- /* length 16-bit little endian */
- unsigned char req_parmp[4]; /* request parameter buffer *
- * pointer (means nothing: the *
- * parameter buffer follows *
- * the CPRB). */
- unsigned char req_datal[4]; /* request data buffer */
- /* length ULELONG */
- unsigned char req_datap[4]; /* request data buffer */
- /* pointer */
- unsigned short rpl_parml; /* reply parameter buffer */
- /* length 16-bit little endian */
- unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
- unsigned char rpl_parmp[4]; /* reply parameter buffer *
- * pointer (means nothing: the *
- * parameter buffer follows *
- * the CPRB). */
- unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
- unsigned char rpl_datap[4]; /* reply data buffer */
- /* pointer */
- unsigned short ccp_rscode; /* server reason code ULESHORT */
- unsigned short ccp_rtcode; /* server return code ULESHORT */
- unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
- unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
- unsigned char repd_datal[4]; /* replied data length ULELONG */
- unsigned char req_pc[2]; /* PC identifier */
- unsigned char res_origin[8]; /* resource origin */
- unsigned char mac_value[8]; /* Mac Value */
- unsigned char logon_id[8]; /* Logon Identifier */
- unsigned char usage_domain[2]; /* cdx */
- unsigned char resv3[18]; /* reserved for requestor */
- unsigned short svr_namel; /* server name length ULESHORT */
- unsigned char svr_name[8]; /* server name */
-} __packed;
-
struct function_and_rules_block {
unsigned char function_code[2];
unsigned short ulen;
@@ -235,7 +179,6 @@ int speed_idx_ep11(int req_type)
}
}
-
/*
* Convert a ICAMEX message to a type6 MEX message.
*
@@ -245,7 +188,7 @@ int speed_idx_ep11(int req_type)
*
* Returns 0 on success or negative errno value.
*/
-static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
+static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
@@ -283,19 +226,19 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
return -EFAULT;
/* Set up key which is located after the variable length text. */
- size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength);
+ size = zcrypt_type6_mex_key_en(mex, msg->text + mex->inputdatalength);
if (size < 0)
return size;
size += sizeof(*msg) + mex->inputdatalength;
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
- msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+ msg->hdr.tocardlen1 = size - sizeof(msg->hdr);
+ msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
- msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+ msg->cprbx.rpl_msgbl = msg->hdr.fromcardlen1;
msg->fr = static_pke_fnr;
@@ -314,7 +257,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
*
* Returns 0 on success or negative errno value.
*/
-static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
+static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
@@ -360,8 +303,8 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
- msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+ msg->hdr.tocardlen1 = size - sizeof(msg->hdr);
+ msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
@@ -388,8 +331,8 @@ struct type86_fmt2_msg {
struct type86_fmt2_ext fmt2;
} __packed;
-static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
- struct ica_xcRB *xcRB,
+static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
+ struct ica_xcRB *xcrb,
unsigned int *fcode,
unsigned short **dom)
{
@@ -402,19 +345,19 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
struct CPRBX cprbx;
} __packed * msg = ap_msg->msg;
- int rcblen = CEIL4(xcRB->request_control_blk_length);
+ int rcblen = CEIL4(xcrb->request_control_blk_length);
int req_sumlen, resp_sumlen;
char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
char *function_code;
- if (CEIL4(xcRB->request_control_blk_length) <
- xcRB->request_control_blk_length)
+ if (CEIL4(xcrb->request_control_blk_length) <
+ xcrb->request_control_blk_length)
return -EINVAL; /* overflow after alignment*/
/* length checks */
ap_msg->len = sizeof(struct type6_hdr) +
- CEIL4(xcRB->request_control_blk_length) +
- xcRB->request_data_length;
+ CEIL4(xcrb->request_control_blk_length) +
+ xcrb->request_data_length;
if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
@@ -422,48 +365,49 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
* Overflow check
* sum must be greater (or equal) than the largest operand
*/
- req_sumlen = CEIL4(xcRB->request_control_blk_length) +
- xcRB->request_data_length;
- if ((CEIL4(xcRB->request_control_blk_length) <=
- xcRB->request_data_length) ?
- (req_sumlen < xcRB->request_data_length) :
- (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ req_sumlen = CEIL4(xcrb->request_control_blk_length) +
+ xcrb->request_data_length;
+ if ((CEIL4(xcrb->request_control_blk_length) <=
+ xcrb->request_data_length) ?
+ req_sumlen < xcrb->request_data_length :
+ req_sumlen < CEIL4(xcrb->request_control_blk_length)) {
return -EINVAL;
}
- if (CEIL4(xcRB->reply_control_blk_length) <
- xcRB->reply_control_blk_length)
+ if (CEIL4(xcrb->reply_control_blk_length) <
+ xcrb->reply_control_blk_length)
return -EINVAL; /* overflow after alignment*/
/*
* Overflow check
* sum must be greater (or equal) than the largest operand
*/
- resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
- xcRB->reply_data_length;
- if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
- (resp_sumlen < xcRB->reply_data_length) :
- (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ resp_sumlen = CEIL4(xcrb->reply_control_blk_length) +
+ xcrb->reply_data_length;
+ if ((CEIL4(xcrb->reply_control_blk_length) <=
+ xcrb->reply_data_length) ?
+ resp_sumlen < xcrb->reply_data_length :
+ resp_sumlen < CEIL4(xcrb->reply_control_blk_length)) {
return -EINVAL;
}
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
- memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
- msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
- if (xcRB->request_data_length) {
+ memcpy(msg->hdr.agent_id, &xcrb->agent_ID, sizeof(xcrb->agent_ID));
+ msg->hdr.tocardlen1 = xcrb->request_control_blk_length;
+ if (xcrb->request_data_length) {
msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
- msg->hdr.ToCardLen2 = xcRB->request_data_length;
+ msg->hdr.tocardlen2 = xcrb->request_data_length;
}
- msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
- msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+ msg->hdr.fromcardlen1 = xcrb->reply_control_blk_length;
+ msg->hdr.fromcardlen2 = xcrb->reply_data_length;
/* prepare CPRB */
- if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr,
- xcRB->request_control_blk_length))
+ if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr,
+ xcrb->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
- xcRB->request_control_blk_length)
+ xcrb->request_control_blk_length)
return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code,
@@ -473,8 +417,8 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
*dom = (unsigned short *)&msg->cprbx.domain;
/* check subfunction, US and AU need special flag with NQAP */
- if (memcmp(function_code, "US", 2) == 0
- || memcmp(function_code, "AU", 2) == 0)
+ if (memcmp(function_code, "US", 2) == 0 ||
+ memcmp(function_code, "AU", 2) == 0)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
#ifdef CONFIG_ZCRYPT_DEBUG
@@ -500,16 +444,16 @@ static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
}
/* copy data block */
- if (xcRB->request_data_length &&
- z_copy_from_user(userspace, req_data, xcRB->request_data_address,
- xcRB->request_data_length))
+ if (xcrb->request_data_length &&
+ z_copy_from_user(userspace, req_data, xcrb->request_data_address,
+ xcrb->request_data_length))
return -EFAULT;
return 0;
}
static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
- struct ep11_urb *xcRB,
+ struct ep11_urb *xcrb,
unsigned int *fcode,
unsigned int *domain)
{
@@ -539,25 +483,25 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
unsigned int dom_val; /* domain id */
} __packed * payload_hdr = NULL;
- if (CEIL4(xcRB->req_len) < xcRB->req_len)
+ if (CEIL4(xcrb->req_len) < xcrb->req_len)
return -EINVAL; /* overflow after alignment*/
/* length checks */
- ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->req_len);
+ ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcrb->req_len);
if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
- if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
+ if (CEIL4(xcrb->resp_len) < xcrb->resp_len)
return -EINVAL; /* overflow after alignment*/
/* prepare type6 header */
msg->hdr = static_type6_ep11_hdr;
- msg->hdr.ToCardLen1 = xcRB->req_len;
- msg->hdr.FromCardLen1 = xcRB->resp_len;
+ msg->hdr.tocardlen1 = xcrb->req_len;
+ msg->hdr.fromcardlen1 = xcrb->resp_len;
/* Import CPRB data from the ioctl input parameter */
- if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len),
- (char __force __user *)xcRB->req, xcRB->req_len)) {
+ if (z_copy_from_user(userspace, &msg->cprbx.cprb_len,
+ (char __force __user *)xcrb->req, xcrb->req_len)) {
return -EFAULT;
}
@@ -575,7 +519,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
} else {
lfmt = 1; /* length format #1 */
}
- payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
/* enable special processing based on the cprbs flags special bit */
@@ -624,9 +568,9 @@ struct type86_ep11_reply {
} __packed;
static int convert_type86_ica(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
{
static unsigned char static_pad[] = {
0x00, 0x02,
@@ -679,18 +623,18 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
+ (int)service_rc, (int)service_rs);
return -EINVAL;
}
zq->online = 0;
pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
+ (int)service_rc, (int)service_rs);
ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
+ (int)service_rc, (int)service_rs);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
@@ -729,42 +673,42 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
*
* @zq: crypto device pointer
* @reply: reply AP message.
- * @xcRB: pointer to XCRB
+ * @xcrb: pointer to XCRB
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
- struct ica_xcRB *xcRB)
+ struct ica_xcRB *xcrb)
{
struct type86_fmt2_msg *msg = reply->msg;
char *data = reply->msg;
/* Copy CPRB to user */
- if (xcRB->reply_control_blk_length < msg->fmt2.count1) {
+ if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
- __func__, xcRB->reply_control_blk_length,
+ __func__, xcrb->reply_control_blk_length,
msg->fmt2.count1);
return -EMSGSIZE;
}
- if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr,
+ if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
- xcRB->reply_control_blk_length = msg->fmt2.count1;
+ xcrb->reply_control_blk_length = msg->fmt2.count1;
/* Copy data buffer to user */
if (msg->fmt2.count2) {
- if (xcRB->reply_data_length < msg->fmt2.count2) {
+ if (xcrb->reply_data_length < msg->fmt2.count2) {
ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n",
- __func__, xcRB->reply_data_length,
+ __func__, xcrb->reply_data_length,
msg->fmt2.count2);
return -EMSGSIZE;
}
- if (z_copy_to_user(userspace, xcRB->reply_data_addr,
+ if (z_copy_to_user(userspace, xcrb->reply_data_addr,
data + msg->fmt2.offset2, msg->fmt2.count2))
return -EFAULT;
}
- xcRB->reply_data_length = msg->fmt2.count2;
+ xcrb->reply_data_length = msg->fmt2.count2;
return 0;
}
@@ -774,35 +718,35 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
*
* @zq: crypto device pointer
* @reply: reply AP message.
- * @xcRB: pointer to EP11 user request block
+ * @xcrb: pointer to EP11 user request block
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
- struct ep11_urb *xcRB)
+ struct ep11_urb *xcrb)
{
struct type86_fmt2_msg *msg = reply->msg;
char *data = reply->msg;
- if (xcRB->resp_len < msg->fmt2.count1) {
+ if (xcrb->resp_len < msg->fmt2.count1) {
ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n",
- __func__, (unsigned int)xcRB->resp_len,
+ __func__, (unsigned int)xcrb->resp_len,
msg->fmt2.count1);
return -EMSGSIZE;
}
/* Copy response CPRB to user */
- if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp,
+ if (z_copy_to_user(userspace, (char __force __user *)xcrb->resp,
data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
- xcRB->resp_len = msg->fmt2.count1;
+ xcrb->resp_len = msg->fmt2.count1;
return 0;
}
static int convert_type86_rng(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char *buffer)
+ struct ap_message *reply,
+ char *buffer)
{
struct {
struct type86_hdr hdr;
@@ -818,9 +762,9 @@ static int convert_type86_rng(struct zcrypt_queue *zq,
}
static int convert_response_ica(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
{
struct type86x_reply *msg = reply->msg;
@@ -830,13 +774,14 @@ static int convert_response_ica(struct zcrypt_queue *zq,
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode &&
- (msg->cprbx.ccp_rscode == 0x14f) &&
- (outputdatalength > 256)) {
+ msg->cprbx.ccp_rscode == 0x14f &&
+ outputdatalength > 256) {
if (zq->zcard->max_exp_bit_length <= 17) {
zq->zcard->max_exp_bit_length = 17;
return -EAGAIN;
- } else
+ } else {
return -EINVAL;
+ }
}
if (msg->hdr.reply_code)
return convert_error(zq, reply);
@@ -850,11 +795,11 @@ static int convert_response_ica(struct zcrypt_queue *zq,
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
+ (int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
@@ -862,41 +807,41 @@ static int convert_response_ica(struct zcrypt_queue *zq,
static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
- struct ica_xcRB *xcRB)
+ struct ica_xcRB *xcrb)
{
struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
- xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ xcrb->status = 0x0008044DL; /* HDD_InvalidParm */
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) {
- memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+ memcpy(&xcrb->status, msg->fmt2.apfs, sizeof(u32));
return convert_error(zq, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_xcrb(userspace, zq, reply, xcRB);
+ return convert_type86_xcrb(userspace, zq, reply, xcrb);
fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
- xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ xcrb->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
+ (int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
- struct ap_message *reply, struct ep11_urb *xcRB)
+ struct ap_message *reply, struct ep11_urb *xcrb)
{
struct type86_ep11_reply *msg = reply->msg;
@@ -908,26 +853,26 @@ static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
if (msg->hdr.reply_code)
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
- return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB);
+ return convert_type86_ep11_xcrb(userspace, zq, reply, xcrb);
fallthrough; /* wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
+ (int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
}
static int convert_response_rng(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char *data)
+ struct ap_message *reply,
+ char *data)
{
struct type86x_reply *msg = reply->msg;
@@ -946,11 +891,11 @@ static int convert_response_rng(struct zcrypt_queue *zq,
pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
+ (int)msg->hdr.type);
ZCRYPT_DBF_ERR(
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
__func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
@@ -965,15 +910,15 @@ static int convert_response_rng(struct zcrypt_queue *zq,
* @reply: pointer to the AP reply message
*/
static void zcrypt_msgtype6_receive(struct ap_queue *aq,
- struct ap_message *msg,
- struct ap_message *reply)
+ struct ap_message *msg,
+ struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct response_type *resp_type =
- (struct response_type *) msg->private;
+ (struct response_type *)msg->private;
struct type86x_reply *t86r;
int len;
@@ -982,7 +927,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
goto out; /* ap_msg->rc indicates the error */
t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
- t86r->cprbx.cprb_ver_id == 0x02) {
+ t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_ICA:
len = sizeof(struct type86x_reply) + t86r->length - 2;
@@ -1005,10 +950,11 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
default:
memcpy(msg->msg, &error_reply, sizeof(error_reply));
}
- } else
+ } else {
memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ }
out:
- complete(&(resp_type->work));
+ complete(&resp_type->work);
}
/*
@@ -1055,7 +1001,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
memcpy(msg->msg, reply->msg, sizeof(error_reply));
}
out:
- complete(&(resp_type->work));
+ complete(&resp_type->work);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
@@ -1076,15 +1022,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
};
int rc;
- ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+ ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &resp_type;
- rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex);
+ rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex);
if (rc)
goto out_free;
init_completion(&resp_type.work);
@@ -1098,11 +1044,13 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
rc = convert_response_ica(zq, ap_msg,
mex->outputdata,
mex->outputdatalength);
- } else
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
out_free:
- free_page((unsigned long) ap_msg->msg);
+ free_page((unsigned long)ap_msg->msg);
ap_msg->private = NULL;
ap_msg->msg = NULL;
return rc;
@@ -1124,15 +1072,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
};
int rc;
- ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+ ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->bufsize = PAGE_SIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &resp_type;
- rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt);
+ rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt);
if (rc)
goto out_free;
init_completion(&resp_type.work);
@@ -1150,8 +1098,9 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
}
+
out_free:
- free_page((unsigned long) ap_msg->msg);
+ free_page((unsigned long)ap_msg->msg);
ap_msg->private = NULL;
ap_msg->msg = NULL;
return rc;
@@ -1166,7 +1115,7 @@ out_free:
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
-int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB,
+int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned short **dom)
{
@@ -1179,12 +1128,12 @@ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
+ return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom);
}
/*
@@ -1192,10 +1141,10 @@ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB,
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxC device to the request distributor
- * @xcRB: pointer to the send_cprb request buffer
+ * @xcrb: pointer to the send_cprb request buffer
*/
static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
- struct ica_xcRB *xcRB,
+ struct ica_xcRB *xcrb,
struct ap_message *ap_msg)
{
int rc;
@@ -1210,11 +1159,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
* Set the queue's reply buffer length minus 128 byte padding
* as reply limit for the card firmware.
*/
- msg->hdr.FromCardLen1 = min_t(unsigned int, msg->hdr.FromCardLen1,
+ msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1,
zq->reply.bufsize - 128);
- if (msg->hdr.FromCardLen2)
- msg->hdr.FromCardLen2 =
- zq->reply.bufsize - msg->hdr.FromCardLen1 - 128;
+ if (msg->hdr.fromcardlen2)
+ msg->hdr.fromcardlen2 =
+ zq->reply.bufsize - msg->hdr.fromcardlen1 - 128;
init_completion(&rtype->work);
rc = ap_queue_message(zq->queue, ap_msg);
@@ -1224,10 +1173,12 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB);
- } else
+ rc = convert_response_xcrb(userspace, zq, ap_msg, xcrb);
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
@@ -1258,7 +1209,7 @@ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
@@ -1272,7 +1223,7 @@ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEX4P device to the request distributor
- * @xcRB: pointer to the ep11 user request block
+ * @xcrb: pointer to the ep11 user request block
*/
static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
struct ep11_urb *xcrb,
@@ -1322,7 +1273,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
} else {
lfmt = 1; /* length format #1 */
}
- payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt);
payload_hdr->dom_val = (unsigned int)
AP_QID_QUEUE(zq->queue->qid);
}
@@ -1331,7 +1282,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
* Set the queue's reply buffer length minus the two prepend headers
* as reply limit for the card firmware.
*/
- msg->hdr.FromCardLen1 = zq->reply.bufsize -
+ msg->hdr.fromcardlen1 = zq->reply.bufsize -
sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext);
init_completion(&rtype->work);
@@ -1343,9 +1294,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
- } else
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
out:
if (rc)
ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
@@ -1366,13 +1319,13 @@ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code,
if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+ rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
*func_code = HWRNG;
return 0;
@@ -1411,9 +1364,10 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_rng(zq, ap_msg, buffer);
- } else
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
out:
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 9da4f4175c44..6f5ced8d6cda 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -45,14 +45,14 @@ struct type6_hdr {
unsigned char reserved5[2]; /* 0x0000 */
unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
unsigned char reserved6[2]; /* 0x0000 */
- unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
- unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
- unsigned int ToCardLen3; /* 0x00000000 */
- unsigned int ToCardLen4; /* 0x00000000 */
- unsigned int FromCardLen1; /* response buffer length */
- unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
- unsigned int FromCardLen3; /* 0x00000000 */
- unsigned int FromCardLen4; /* 0x00000000 */
+ unsigned int tocardlen1; /* (request CPRB len + 3) & -4 */
+ unsigned int tocardlen2; /* db len 0x00000000 for PKD */
+ unsigned int tocardlen3; /* 0x00000000 */
+ unsigned int tocardlen4; /* 0x00000000 */
+ unsigned int fromcardlen1; /* response buffer length */
+ unsigned int fromcardlen2; /* db len 0x00000000 for PKD */
+ unsigned int fromcardlen3; /* 0x00000000 */
+ unsigned int fromcardlen4; /* 0x00000000 */
} __packed;
/**
@@ -116,7 +116,7 @@ int speed_idx_ep11(int);
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
-static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+static inline void rng_type6cprb_msgx(struct ap_message *ap_msg,
unsigned int random_number_length,
unsigned int *domain)
{
@@ -134,8 +134,8 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
.offset1 = 0x00000058,
.agent_id = {'C', 'A'},
.function_code = {'R', 'L'},
- .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
- .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
+ .tocardlen1 = sizeof(*msg) - sizeof(msg->hdr),
+ .fromcardlen1 = sizeof(*msg) - sizeof(msg->hdr),
};
static struct CPRBX local_cprbx = {
.cprb_len = 0x00dc,
@@ -147,9 +147,9 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
};
msg->hdr = static_type6_hdrX;
- msg->hdr.FromCardLen2 = random_number_length,
+ msg->hdr.fromcardlen2 = random_number_length;
msg->cprbx = local_cprbx;
- msg->cprbx.rpl_datal = random_number_length,
+ msg->cprbx.rpl_datal = random_number_length;
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 1552a850a52e..cdc5a4b2c019 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -114,7 +114,7 @@ struct zcrypt_queue *zcrypt_queue_alloc(size_t reply_buf_size)
{
struct zcrypt_queue *zq;
- zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+ zq = kzalloc(sizeof(*zq), GFP_KERNEL);
if (!zq)
return NULL;
zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL);
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index ab78bd4c2eb0..c92ac75d6556 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -63,7 +63,7 @@
ARCH_EXIT_TO_USER_MODE_WORK)
/**
- * arch_check_user_regs - Architecture specific sanity check for user mode regs
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
* @regs: Pointer to currents pt_regs
*
* Defaults to an empty implementation. Can be replaced by architecture
@@ -73,10 +73,10 @@
* section. Use __always_inline so the compiler cannot push it out of line
* and make it instrumentable.
*/
-static __always_inline void arch_check_user_regs(struct pt_regs *regs);
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
-#ifndef arch_check_user_regs
-static __always_inline void arch_check_user_regs(struct pt_regs *regs) {}
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
#endif
/**
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 93c3b86e781c..9e63923c5a0f 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -17,7 +17,7 @@
/* See comment for enter_from_user_mode() in entry-common.h */
static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
{
- arch_check_user_regs(regs);
+ arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index 7c20252a90c6..250925aab101 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -24,9 +24,8 @@ icc)
echo 16.0.3
;;
llvm)
- # https://lore.kernel.org/r/YMtib5hKVyNknZt3@osiris/
if [ "$SRCARCH" = s390 ]; then
- echo 13.0.0
+ echo 14.0.0
else
echo 11.0.0
fi