diff options
Diffstat (limited to 'arch/s390')
73 files changed, 1918 insertions, 2070 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 43a81d0ad507..d4051e88e625 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -170,6 +170,7 @@ config S390 select HAVE_PERF_EVENTS select HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING @@ -246,8 +247,8 @@ choice config MARCH_Z900 bool "IBM zSeries model z800 and z900" - depends on !CC_IS_CLANG select HAVE_MARCH_Z900_FEATURES + depends on $(cc-option,-march=z900) help Select this to enable optimizations for model z800/z900 (2064 and 2066 series). This will enable some optimizations that are not @@ -255,8 +256,8 @@ config MARCH_Z900 config MARCH_Z990 bool "IBM zSeries model z890 and z990" - depends on !CC_IS_CLANG select HAVE_MARCH_Z990_FEATURES + depends on $(cc-option,-march=z990) help Select this to enable optimizations for model z890/z990 (2084 and 2086 series). The kernel will be slightly faster but will not work @@ -264,8 +265,8 @@ config MARCH_Z990 config MARCH_Z9_109 bool "IBM System z9" - depends on !CC_IS_CLANG select HAVE_MARCH_Z9_109_FEATURES + depends on $(cc-option,-march=z9-109) help Select this to enable optimizations for IBM System z9 (2094 and 2096 series). The kernel will be slightly faster but will not work @@ -274,6 +275,7 @@ config MARCH_Z9_109 config MARCH_Z10 bool "IBM System z10" select HAVE_MARCH_Z10_FEATURES + depends on $(cc-option,-march=z10) help Select this to enable optimizations for IBM System z10 (2097 and 2098 series). The kernel will be slightly faster but will not work @@ -282,6 +284,7 @@ config MARCH_Z10 config MARCH_Z196 bool "IBM zEnterprise 114 and 196" select HAVE_MARCH_Z196_FEATURES + depends on $(cc-option,-march=z196) help Select this to enable optimizations for IBM zEnterprise 114 and 196 (2818 and 2817 series). The kernel will be slightly faster but will @@ -290,6 +293,7 @@ config MARCH_Z196 config MARCH_ZEC12 bool "IBM zBC12 and zEC12" select HAVE_MARCH_ZEC12_FEATURES + depends on $(cc-option,-march=zEC12) help Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and 2827 series). The kernel will be slightly faster but will not work on @@ -298,6 +302,7 @@ config MARCH_ZEC12 config MARCH_Z13 bool "IBM z13s and z13" select HAVE_MARCH_Z13_FEATURES + depends on $(cc-option,-march=z13) help Select this to enable optimizations for IBM z13s and z13 (2965 and 2964 series). The kernel will be slightly faster but will not work on @@ -306,6 +311,7 @@ config MARCH_Z13 config MARCH_Z14 bool "IBM z14 ZR1 and z14" select HAVE_MARCH_Z14_FEATURES + depends on $(cc-option,-march=z14) help Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 and 3906 series). The kernel will be slightly faster but will not @@ -314,6 +320,7 @@ config MARCH_Z14 config MARCH_Z15 bool "IBM z15" select HAVE_MARCH_Z15_FEATURES + depends on $(cc-option,-march=z15) help Select this to enable optimizations for IBM z15 (8562 and 8561 series). The kernel will be slightly faster but will not @@ -367,33 +374,39 @@ config TUNE_DEFAULT config TUNE_Z900 bool "IBM zSeries model z800 and z900" - depends on !CC_IS_CLANG + depends on $(cc-option,-mtune=z900) config TUNE_Z990 bool "IBM zSeries model z890 and z990" - depends on !CC_IS_CLANG + depends on $(cc-option,-mtune=z990) config TUNE_Z9_109 bool "IBM System z9" - depends on !CC_IS_CLANG + depends on $(cc-option,-mtune=z9-109) config TUNE_Z10 bool "IBM System z10" + depends on $(cc-option,-mtune=z10) config TUNE_Z196 bool "IBM zEnterprise 114 and 196" + depends on $(cc-option,-mtune=z196) config TUNE_ZEC12 bool "IBM zBC12 and zEC12" + depends on $(cc-option,-mtune=zEC12) config TUNE_Z13 - bool "IBM z13" + bool "IBM z13s and z13" + depends on $(cc-option,-mtune=z13) config TUNE_Z14 - bool "IBM z14" + bool "IBM z14 ZR1 and z14" + depends on $(cc-option,-mtune=z14) config TUNE_Z15 bool "IBM z15" + depends on $(cc-option,-mtune=z15) endchoice @@ -414,9 +427,6 @@ config COMPAT (and some other stuff like libraries and such) is needed for executing 31 bit applications. It is safe to say "Y". -config COMPAT_VDSO - def_bool COMPAT && !CC_IS_CLANG - config SYSVIPC_COMPAT def_bool y if COMPAT && SYSVIPC @@ -1006,3 +1016,17 @@ config S390_GUEST the KVM hypervisor. endmenu + +menu "Selftests" + +config S390_UNWIND_SELFTEST + def_tristate n + prompt "Test unwind functions" + help + This option enables s390 specific stack unwinder testing kernel + module. This option is not useful for distributions or general + kernels, but only for kernel developers working on architecture code. + + Say N if you are unsure. + +endmenu diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 478b645b20dd..ba8556bb0fb1 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -157,7 +157,6 @@ zfcpdump: vdso_install: $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ - $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 596ca7cc4d7b..3b3a11f95269 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -46,7 +46,7 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = { .diag0c = _diag0c_dma, .diag308_reset = _diag308_reset_dma }; -static struct diag210 _diag210_tmp_dma __section(".dma.data"); +static struct diag210 _diag210_tmp_dma __section(.dma.data); struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma; void _swsusp_reset_dma(void); unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma); @@ -101,10 +101,18 @@ static void handle_relocs(unsigned long offset) dynsym = (Elf64_Sym *) vmlinux.dynsym_start; for (rela = rela_start; rela < rela_end; rela++) { loc = rela->r_offset + offset; - val = rela->r_addend + offset; + val = rela->r_addend; r_sym = ELF64_R_SYM(rela->r_info); - if (r_sym) - val += dynsym[r_sym].st_value; + if (r_sym) { + if (dynsym[r_sym].st_shndx != SHN_UNDEF) + val += dynsym[r_sym].st_value + offset; + } else { + /* + * 0 == undefined symbol table index (STN_UNDEF), + * used for R_390_RELATIVE, only add KASLR offset + */ + val += offset; + } r_type = ELF64_R_TYPE(rela->r_info); rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0); if (rc) @@ -162,6 +170,11 @@ void startup_kernel(void) handle_relocs(__kaslr_offset); if (__kaslr_offset) { + /* + * Save KASLR offset for early dumps, before vmcore_info is set. + * Mark as uneven to distinguish from real vmcore_info pointer. + */ + S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; /* Clear non-relocated kernel */ if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) memset(img, 0, vmlinux.image_size); diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 38d64030aacf..2e60c80395ab 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -62,7 +62,6 @@ CONFIG_OPROFILE=m CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_STATIC_KEYS_SELFTEST=y -CONFIG_REFCOUNT_FULL=y CONFIG_LOCK_EVENT_COUNTS=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 9803e96d2924..ead0b2c9881d 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -44,7 +44,7 @@ struct s390_aes_ctx { int key_len; unsigned long fc; union { - struct crypto_sync_skcipher *blk; + struct crypto_skcipher *skcipher; struct crypto_cipher *cip; } fallback; }; @@ -54,7 +54,7 @@ struct s390_xts_ctx { u8 pcc_key[32]; int key_len; unsigned long fc; - struct crypto_sync_skcipher *fallback; + struct crypto_skcipher *fallback; }; struct gcm_sg_walk { @@ -178,66 +178,41 @@ static struct crypto_alg aes_alg = { } }; -static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, - unsigned int len) +static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key, + unsigned int len) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - unsigned int ret; - - crypto_sync_skcipher_clear_flags(sctx->fallback.blk, - CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & - CRYPTO_TFM_REQ_MASK); - - ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len); - - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) & - CRYPTO_TFM_RES_MASK; - - return ret; -} - -static int fallback_blk_dec(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - unsigned int ret; - struct crypto_blkcipher *tfm = desc->tfm; - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); - - skcipher_request_set_sync_tfm(req, sctx->fallback.blk); - skcipher_request_set_callback(req, desc->flags, NULL, NULL); - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); - - ret = crypto_skcipher_decrypt(req); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); + int ret; - skcipher_request_zero(req); + crypto_skcipher_clear_flags(sctx->fallback.skcipher, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(sctx->fallback.skcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); + crypto_skcipher_set_flags(tfm, + crypto_skcipher_get_flags(sctx->fallback.skcipher) & + CRYPTO_TFM_RES_MASK); return ret; } -static int fallback_blk_enc(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx, + struct skcipher_request *req, + unsigned long modifier) { - unsigned int ret; - struct crypto_blkcipher *tfm = desc->tfm; - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); - - skcipher_request_set_sync_tfm(req, sctx->fallback.blk); - skcipher_request_set_callback(req, desc->flags, NULL, NULL); - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + struct skcipher_request *subreq = skcipher_request_ctx(req); - ret = crypto_skcipher_encrypt(req); - return ret; + *subreq = *req; + skcipher_request_set_tfm(subreq, sctx->fallback.skcipher); + return (modifier & CPACF_DECRYPT) ? + crypto_skcipher_decrypt(subreq) : + crypto_skcipher_encrypt(subreq); } -static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); unsigned long fc; /* Pick the correct function code based on the key length */ @@ -248,111 +223,92 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* Check if the function code is available */ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; if (!sctx->fc) - return setkey_fallback_blk(tfm, in_key, key_len); + return setkey_fallback_skcipher(tfm, in_key, key_len); sctx->key_len = key_len; memcpy(sctx->key, in_key, key_len); return 0; } -static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int nbytes, n; int ret; - ret = blkcipher_walk_virt(desc, walk); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + if (unlikely(!sctx->fc)) + return fallback_skcipher_crypt(sctx, req, modifier); + + ret = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); cpacf_km(sctx->fc | modifier, sctx->key, - walk->dst.virt.addr, walk->src.virt.addr, n); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + walk.dst.virt.addr, walk.src.virt.addr, n); + ret = skcipher_walk_done(&walk, nbytes - n); } - return ret; } -static int ecb_aes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_aes_encrypt(struct skcipher_request *req) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (unlikely(!sctx->fc)) - return fallback_blk_enc(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_aes_crypt(desc, 0, &walk); + return ecb_aes_crypt(req, 0); } -static int ecb_aes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_aes_decrypt(struct skcipher_request *req) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (unlikely(!sctx->fc)) - return fallback_blk_dec(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk); + return ecb_aes_crypt(req, CPACF_DECRYPT); } -static int fallback_init_blk(struct crypto_tfm *tfm) +static int fallback_init_skcipher(struct crypto_skcipher *tfm) { - const char *name = tfm->__crt_alg->cra_name; - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); - sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0, - CRYPTO_ALG_NEED_FALLBACK); + sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); - if (IS_ERR(sctx->fallback.blk)) { + if (IS_ERR(sctx->fallback.skcipher)) { pr_err("Allocating AES fallback algorithm %s failed\n", name); - return PTR_ERR(sctx->fallback.blk); + return PTR_ERR(sctx->fallback.skcipher); } + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(sctx->fallback.skcipher)); return 0; } -static void fallback_exit_blk(struct crypto_tfm *tfm) +static void fallback_exit_skcipher(struct crypto_skcipher *tfm) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(sctx->fallback.blk); + crypto_free_skcipher(sctx->fallback.skcipher); } -static struct crypto_alg ecb_aes_alg = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-s390", - .cra_priority = 401, /* combo: aes + ecb + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_aes_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = fallback_init_blk, - .cra_exit = fallback_exit_blk, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = ecb_aes_set_key, - .encrypt = ecb_aes_encrypt, - .decrypt = ecb_aes_decrypt, - } - } +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-s390", + .base.cra_priority = 401, /* combo: aes + ecb + 1 */ + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_aes_ctx), + .base.cra_module = THIS_MODULE, + .init = fallback_init_skcipher, + .exit = fallback_exit_skcipher, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ecb_aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, }; -static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); unsigned long fc; /* Pick the correct function code based on the key length */ @@ -363,17 +319,18 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* Check if the function code is available */ sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; if (!sctx->fc) - return setkey_fallback_blk(tfm, in_key, key_len); + return setkey_fallback_skcipher(tfm, in_key, key_len); sctx->key_len = key_len; memcpy(sctx->key, in_key, key_len); return 0; } -static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int nbytes, n; int ret; struct { @@ -381,134 +338,74 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, u8 key[AES_MAX_KEY_SIZE]; } param; - ret = blkcipher_walk_virt(desc, walk); - memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); + if (unlikely(!sctx->fc)) + return fallback_skcipher_crypt(sctx, req, modifier); + + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); memcpy(param.key, sctx->key, sctx->key_len); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); cpacf_kmc(sctx->fc | modifier, ¶m, - walk->dst.virt.addr, walk->src.virt.addr, n); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + walk.dst.virt.addr, walk.src.virt.addr, n); + memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - n); } - memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); return ret; } -static int cbc_aes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_aes_encrypt(struct skcipher_request *req) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (unlikely(!sctx->fc)) - return fallback_blk_enc(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_aes_crypt(desc, 0, &walk); + return cbc_aes_crypt(req, 0); } -static int cbc_aes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_aes_decrypt(struct skcipher_request *req) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (unlikely(!sctx->fc)) - return fallback_blk_dec(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk); + return cbc_aes_crypt(req, CPACF_DECRYPT); } -static struct crypto_alg cbc_aes_alg = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-s390", - .cra_priority = 402, /* ecb-aes-s390 + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_aes_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = fallback_init_blk, - .cra_exit = fallback_exit_blk, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = cbc_aes_set_key, - .encrypt = cbc_aes_encrypt, - .decrypt = cbc_aes_decrypt, - } - } +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-s390", + .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_aes_ctx), + .base.cra_module = THIS_MODULE, + .init = fallback_init_skcipher, + .exit = fallback_exit_skcipher, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, }; -static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int len) -{ - struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); - unsigned int ret; - - crypto_sync_skcipher_clear_flags(xts_ctx->fallback, - CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & - CRYPTO_TFM_REQ_MASK); - - ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len); - - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) & - CRYPTO_TFM_RES_MASK; - - return ret; -} - -static int xts_fallback_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct crypto_blkcipher *tfm = desc->tfm; - struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); - unsigned int ret; - - skcipher_request_set_sync_tfm(req, xts_ctx->fallback); - skcipher_request_set_callback(req, desc->flags, NULL, NULL); - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); - - ret = crypto_skcipher_decrypt(req); - - skcipher_request_zero(req); - return ret; -} - -static int xts_fallback_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int len) { - struct crypto_blkcipher *tfm = desc->tfm; - struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); - SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); - unsigned int ret; - - skcipher_request_set_sync_tfm(req, xts_ctx->fallback); - skcipher_request_set_callback(req, desc->flags, NULL, NULL); - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); - - ret = crypto_skcipher_encrypt(req); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); + int ret; - skcipher_request_zero(req); + crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(xts_ctx->fallback, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len); + crypto_skcipher_set_flags(tfm, + crypto_skcipher_get_flags(xts_ctx->fallback) & + CRYPTO_TFM_RES_MASK); return ret; } -static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); unsigned long fc; int err; @@ -518,7 +415,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* In fips mode only 128 bit or 256 bit keys are valid */ if (fips_enabled && key_len != 32 && key_len != 64) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -539,10 +436,11 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return 0; } -static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier) { - struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int offset, nbytes, n; int ret; struct { @@ -557,113 +455,100 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, u8 init[16]; } xts_param; - ret = blkcipher_walk_virt(desc, walk); + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + + *subreq = *req; + skcipher_request_set_tfm(subreq, xts_ctx->fallback); + return (modifier & CPACF_DECRYPT) ? + crypto_skcipher_decrypt(subreq) : + crypto_skcipher_encrypt(subreq); + } + + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; offset = xts_ctx->key_len & 0x10; memset(pcc_param.block, 0, sizeof(pcc_param.block)); memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); - memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); + memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); memcpy(xts_param.init, pcc_param.xts, 16); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, - walk->dst.virt.addr, walk->src.virt.addr, n); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + walk.dst.virt.addr, walk.src.virt.addr, n); + ret = skcipher_walk_done(&walk, nbytes - n); } return ret; } -static int xts_aes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int xts_aes_encrypt(struct skcipher_request *req) { - struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (!nbytes) - return -EINVAL; - - if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0)) - return xts_fallback_encrypt(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return xts_aes_crypt(desc, 0, &walk); + return xts_aes_crypt(req, 0); } -static int xts_aes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int xts_aes_decrypt(struct skcipher_request *req) { - struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (!nbytes) - return -EINVAL; - - if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0)) - return xts_fallback_decrypt(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return xts_aes_crypt(desc, CPACF_DECRYPT, &walk); + return xts_aes_crypt(req, CPACF_DECRYPT); } -static int xts_fallback_init(struct crypto_tfm *tfm) +static int xts_fallback_init(struct crypto_skcipher *tfm) { - const char *name = tfm->__crt_alg->cra_name; - struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); - xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0, - CRYPTO_ALG_NEED_FALLBACK); + xts_ctx->fallback = crypto_alloc_skcipher(name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(xts_ctx->fallback)) { pr_err("Allocating XTS fallback algorithm %s failed\n", name); return PTR_ERR(xts_ctx->fallback); } + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(xts_ctx->fallback)); return 0; } -static void xts_fallback_exit(struct crypto_tfm *tfm) +static void xts_fallback_exit(struct crypto_skcipher *tfm) { - struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(xts_ctx->fallback); + crypto_free_skcipher(xts_ctx->fallback); } -static struct crypto_alg xts_aes_alg = { - .cra_name = "xts(aes)", - .cra_driver_name = "xts-aes-s390", - .cra_priority = 402, /* ecb-aes-s390 + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_xts_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = xts_fallback_init, - .cra_exit = xts_fallback_exit, - .cra_u = { - .blkcipher = { - .min_keysize = 2 * AES_MIN_KEY_SIZE, - .max_keysize = 2 * AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = xts_aes_set_key, - .encrypt = xts_aes_encrypt, - .decrypt = xts_aes_decrypt, - } - } +static struct skcipher_alg xts_aes_alg = { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-aes-s390", + .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_xts_ctx), + .base.cra_module = THIS_MODULE, + .init = xts_fallback_init, + .exit = xts_fallback_exit, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = xts_aes_set_key, + .encrypt = xts_aes_encrypt, + .decrypt = xts_aes_decrypt, }; -static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); unsigned long fc; /* Pick the correct function code based on the key length */ @@ -674,7 +559,7 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* Check if the function code is available */ sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; if (!sctx->fc) - return setkey_fallback_blk(tfm, in_key, key_len); + return setkey_fallback_skcipher(tfm, in_key, key_len); sctx->key_len = key_len; memcpy(sctx->key, in_key, key_len); @@ -696,30 +581,34 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) return n; } -static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int ctr_aes_crypt(struct skcipher_request *req) { - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); u8 buf[AES_BLOCK_SIZE], *ctrptr; + struct skcipher_walk walk; unsigned int n, nbytes; int ret, locked; + if (unlikely(!sctx->fc)) + return fallback_skcipher_crypt(sctx, req, 0); + locked = mutex_trylock(&ctrblk_lock); - ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + ret = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { n = AES_BLOCK_SIZE; + if (nbytes >= 2*AES_BLOCK_SIZE && locked) - n = __ctrblk_init(ctrblk, walk->iv, nbytes); - ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; - cpacf_kmctr(sctx->fc | modifier, sctx->key, - walk->dst.virt.addr, walk->src.virt.addr, - n, ctrptr); + n = __ctrblk_init(ctrblk, walk.iv, nbytes); + ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; + cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr, + walk.src.virt.addr, n, ctrptr); if (ctrptr == ctrblk) - memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE, + memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(walk->iv, AES_BLOCK_SIZE); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + crypto_inc(walk.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - n); } if (locked) mutex_unlock(&ctrblk_lock); @@ -727,67 +616,33 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, * final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { - cpacf_kmctr(sctx->fc | modifier, sctx->key, - buf, walk->src.virt.addr, - AES_BLOCK_SIZE, walk->iv); - memcpy(walk->dst.virt.addr, buf, nbytes); - crypto_inc(walk->iv, AES_BLOCK_SIZE); - ret = blkcipher_walk_done(desc, walk, 0); + cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr, + AES_BLOCK_SIZE, walk.iv); + memcpy(walk.dst.virt.addr, buf, nbytes); + crypto_inc(walk.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, 0); } return ret; } -static int ctr_aes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (unlikely(!sctx->fc)) - return fallback_blk_enc(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_aes_crypt(desc, 0, &walk); -} - -static int ctr_aes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - - if (unlikely(!sctx->fc)) - return fallback_blk_dec(desc, dst, src, nbytes); - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk); -} - -static struct crypto_alg ctr_aes_alg = { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-s390", - .cra_priority = 402, /* ecb-aes-s390 + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct s390_aes_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = fallback_init_blk, - .cra_exit = fallback_exit_blk, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ctr_aes_set_key, - .encrypt = ctr_aes_encrypt, - .decrypt = ctr_aes_decrypt, - } - } +static struct skcipher_alg ctr_aes_alg = { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-s390", + .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct s390_aes_ctx), + .base.cra_module = THIS_MODULE, + .init = fallback_init_skcipher, + .exit = fallback_exit_skcipher, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_aes_set_key, + .encrypt = ctr_aes_crypt, + .decrypt = ctr_aes_crypt, + .chunksize = AES_BLOCK_SIZE, }; static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, @@ -1116,24 +971,27 @@ static struct aead_alg gcm_aes_aead = { }, }; -static struct crypto_alg *aes_s390_algs_ptr[5]; -static int aes_s390_algs_num; +static struct crypto_alg *aes_s390_alg; +static struct skcipher_alg *aes_s390_skcipher_algs[4]; +static int aes_s390_skciphers_num; static struct aead_alg *aes_s390_aead_alg; -static int aes_s390_register_alg(struct crypto_alg *alg) +static int aes_s390_register_skcipher(struct skcipher_alg *alg) { int ret; - ret = crypto_register_alg(alg); + ret = crypto_register_skcipher(alg); if (!ret) - aes_s390_algs_ptr[aes_s390_algs_num++] = alg; + aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg; return ret; } static void aes_s390_fini(void) { - while (aes_s390_algs_num--) - crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]); + if (aes_s390_alg) + crypto_unregister_alg(aes_s390_alg); + while (aes_s390_skciphers_num--) + crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]); if (ctrblk) free_page((unsigned long) ctrblk); @@ -1154,10 +1012,11 @@ static int __init aes_s390_init(void) if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || cpacf_test_func(&km_functions, CPACF_KM_AES_192) || cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { - ret = aes_s390_register_alg(&aes_alg); + ret = crypto_register_alg(&aes_alg); if (ret) goto out_err; - ret = aes_s390_register_alg(&ecb_aes_alg); + aes_s390_alg = &aes_alg; + ret = aes_s390_register_skcipher(&ecb_aes_alg); if (ret) goto out_err; } @@ -1165,14 +1024,14 @@ static int __init aes_s390_init(void) if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { - ret = aes_s390_register_alg(&cbc_aes_alg); + ret = aes_s390_register_skcipher(&cbc_aes_alg); if (ret) goto out_err; } if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { - ret = aes_s390_register_alg(&xts_aes_alg); + ret = aes_s390_register_skcipher(&xts_aes_alg); if (ret) goto out_err; } @@ -1185,7 +1044,7 @@ static int __init aes_s390_init(void) ret = -ENOMEM; goto out_err; } - ret = aes_s390_register_alg(&ctr_aes_alg); + ret = aes_s390_register_skcipher(&ctr_aes_alg); if (ret) goto out_err; } diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 439b100c6f2e..bfbafd35bcbd 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -17,6 +17,7 @@ #include <linux/mutex.h> #include <crypto/algapi.h> #include <crypto/internal/des.h> +#include <crypto/internal/skcipher.h> #include <asm/cpacf.h> #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) @@ -45,6 +46,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, return 0; } +static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + return des_setkey(crypto_skcipher_tfm(tfm), key, key_len); +} + static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); @@ -79,28 +86,30 @@ static struct crypto_alg des_alg = { } }; -static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, - struct blkcipher_walk *walk) +static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int nbytes, n; int ret; - ret = blkcipher_walk_virt(desc, walk); - while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { + ret = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(DES_BLOCK_SIZE - 1); - cpacf_km(fc, ctx->key, walk->dst.virt.addr, - walk->src.virt.addr, n); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + cpacf_km(fc, ctx->key, walk.dst.virt.addr, + walk.src.virt.addr, n); + ret = skcipher_walk_done(&walk, nbytes - n); } return ret; } -static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, - struct blkcipher_walk *walk) +static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int nbytes, n; int ret; struct { @@ -108,99 +117,69 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, u8 key[DES3_KEY_SIZE]; } param; - ret = blkcipher_walk_virt(desc, walk); - memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + memcpy(param.iv, walk.iv, DES_BLOCK_SIZE); memcpy(param.key, ctx->key, DES3_KEY_SIZE); - while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(DES_BLOCK_SIZE - 1); - cpacf_kmc(fc, ¶m, walk->dst.virt.addr, - walk->src.virt.addr, n); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + cpacf_kmc(fc, ¶m, walk.dst.virt.addr, + walk.src.virt.addr, n); + memcpy(walk.iv, param.iv, DES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - n); } - memcpy(walk->iv, param.iv, DES_BLOCK_SIZE); return ret; } -static int ecb_des_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_des_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_desall_crypt(desc, CPACF_KM_DEA, &walk); + return ecb_desall_crypt(req, CPACF_KM_DEA); } -static int ecb_des_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_des_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_desall_crypt(desc, CPACF_KM_DEA | CPACF_DECRYPT, &walk); + return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT); } -static struct crypto_alg ecb_des_alg = { - .cra_name = "ecb(des)", - .cra_driver_name = "ecb-des-s390", - .cra_priority = 400, /* combo: des + ecb */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = des_setkey, - .encrypt = ecb_des_encrypt, - .decrypt = ecb_des_decrypt, - } - } +static struct skcipher_alg ecb_des_alg = { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "ecb-des-s390", + .base.cra_priority = 400, /* combo: des + ecb */ + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_des_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_setkey_skcipher, + .encrypt = ecb_des_encrypt, + .decrypt = ecb_des_decrypt, }; -static int cbc_des_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_des_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, CPACF_KMC_DEA, &walk); + return cbc_desall_crypt(req, CPACF_KMC_DEA); } -static int cbc_des_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_des_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, CPACF_KMC_DEA | CPACF_DECRYPT, &walk); + return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT); } -static struct crypto_alg cbc_des_alg = { - .cra_name = "cbc(des)", - .cra_driver_name = "cbc-des-s390", - .cra_priority = 400, /* combo: des + cbc */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des_setkey, - .encrypt = cbc_des_encrypt, - .decrypt = cbc_des_decrypt, - } - } +static struct skcipher_alg cbc_des_alg = { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "cbc-des-s390", + .base.cra_priority = 400, /* combo: des + cbc */ + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_des_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = des_setkey_skcipher, + .encrypt = cbc_des_encrypt, + .decrypt = cbc_des_decrypt, }; /* @@ -232,6 +211,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, return 0; } +static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len); +} + static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); @@ -266,87 +251,53 @@ static struct crypto_alg des3_alg = { } }; -static int ecb_des3_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_des3_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_desall_crypt(desc, CPACF_KM_TDEA_192, &walk); + return ecb_desall_crypt(req, CPACF_KM_TDEA_192); } -static int ecb_des3_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_des3_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_desall_crypt(desc, CPACF_KM_TDEA_192 | CPACF_DECRYPT, - &walk); + return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT); } -static struct crypto_alg ecb_des3_alg = { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "ecb-des3_ede-s390", - .cra_priority = 400, /* combo: des3 + ecb */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES3_KEY_SIZE, - .max_keysize = DES3_KEY_SIZE, - .setkey = des3_setkey, - .encrypt = ecb_des3_encrypt, - .decrypt = ecb_des3_decrypt, - } - } +static struct skcipher_alg ecb_des3_alg = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3_ede-s390", + .base.cra_priority = 400, /* combo: des3 + ecb */ + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_des_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = DES3_KEY_SIZE, + .max_keysize = DES3_KEY_SIZE, + .setkey = des3_setkey_skcipher, + .encrypt = ecb_des3_encrypt, + .decrypt = ecb_des3_decrypt, }; -static int cbc_des3_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_des3_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192, &walk); + return cbc_desall_crypt(req, CPACF_KMC_TDEA_192); } -static int cbc_des3_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_des3_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192 | CPACF_DECRYPT, - &walk); + return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT); } -static struct crypto_alg cbc_des3_alg = { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "cbc-des3_ede-s390", - .cra_priority = 400, /* combo: des3 + cbc */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES3_KEY_SIZE, - .max_keysize = DES3_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des3_setkey, - .encrypt = cbc_des3_encrypt, - .decrypt = cbc_des3_decrypt, - } - } +static struct skcipher_alg cbc_des3_alg = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3_ede-s390", + .base.cra_priority = 400, /* combo: des3 + cbc */ + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_des_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = DES3_KEY_SIZE, + .max_keysize = DES3_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = des3_setkey_skcipher, + .encrypt = cbc_des3_encrypt, + .decrypt = cbc_des3_decrypt, }; static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) @@ -364,128 +315,90 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) return n; } -static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, - struct blkcipher_walk *walk) +static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm); u8 buf[DES_BLOCK_SIZE], *ctrptr; + struct skcipher_walk walk; unsigned int n, nbytes; int ret, locked; locked = mutex_trylock(&ctrblk_lock); - ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); - while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { + ret = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) { n = DES_BLOCK_SIZE; if (nbytes >= 2*DES_BLOCK_SIZE && locked) - n = __ctrblk_init(ctrblk, walk->iv, nbytes); - ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv; - cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr, - walk->src.virt.addr, n, ctrptr); + n = __ctrblk_init(ctrblk, walk.iv, nbytes); + ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv; + cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr, + walk.src.virt.addr, n, ctrptr); if (ctrptr == ctrblk) - memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE, + memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); - crypto_inc(walk->iv, DES_BLOCK_SIZE); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + crypto_inc(walk.iv, DES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - n); } if (locked) mutex_unlock(&ctrblk_lock); /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { - cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr, - DES_BLOCK_SIZE, walk->iv); - memcpy(walk->dst.virt.addr, buf, nbytes); - crypto_inc(walk->iv, DES_BLOCK_SIZE); - ret = blkcipher_walk_done(desc, walk, 0); + cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr, + DES_BLOCK_SIZE, walk.iv); + memcpy(walk.dst.virt.addr, buf, nbytes); + crypto_inc(walk.iv, DES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, 0); } return ret; } -static int ctr_des_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_desall_crypt(desc, CPACF_KMCTR_DEA, &walk); -} - -static int ctr_des_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ctr_des_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_desall_crypt(desc, CPACF_KMCTR_DEA | CPACF_DECRYPT, &walk); + return ctr_desall_crypt(req, CPACF_KMCTR_DEA); } -static struct crypto_alg ctr_des_alg = { - .cra_name = "ctr(des)", - .cra_driver_name = "ctr-des-s390", - .cra_priority = 400, /* combo: des + ctr */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des_setkey, - .encrypt = ctr_des_encrypt, - .decrypt = ctr_des_decrypt, - } - } +static struct skcipher_alg ctr_des_alg = { + .base.cra_name = "ctr(des)", + .base.cra_driver_name = "ctr-des-s390", + .base.cra_priority = 400, /* combo: des + ctr */ + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct s390_des_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = des_setkey_skcipher, + .encrypt = ctr_des_crypt, + .decrypt = ctr_des_crypt, + .chunksize = DES_BLOCK_SIZE, }; -static int ctr_des3_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192, &walk); -} - -static int ctr_des3_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ctr_des3_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192 | CPACF_DECRYPT, - &walk); + return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192); } -static struct crypto_alg ctr_des3_alg = { - .cra_name = "ctr(des3_ede)", - .cra_driver_name = "ctr-des3_ede-s390", - .cra_priority = 400, /* combo: des3 + ede */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct s390_des_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES3_KEY_SIZE, - .max_keysize = DES3_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des3_setkey, - .encrypt = ctr_des3_encrypt, - .decrypt = ctr_des3_decrypt, - } - } +static struct skcipher_alg ctr_des3_alg = { + .base.cra_name = "ctr(des3_ede)", + .base.cra_driver_name = "ctr-des3_ede-s390", + .base.cra_priority = 400, /* combo: des3 + ede */ + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct s390_des_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = DES3_KEY_SIZE, + .max_keysize = DES3_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = des3_setkey_skcipher, + .encrypt = ctr_des3_crypt, + .decrypt = ctr_des3_crypt, + .chunksize = DES_BLOCK_SIZE, }; -static struct crypto_alg *des_s390_algs_ptr[8]; +static struct crypto_alg *des_s390_algs_ptr[2]; static int des_s390_algs_num; +static struct skcipher_alg *des_s390_skciphers_ptr[6]; +static int des_s390_skciphers_num; static int des_s390_register_alg(struct crypto_alg *alg) { @@ -497,10 +410,22 @@ static int des_s390_register_alg(struct crypto_alg *alg) return ret; } +static int des_s390_register_skcipher(struct skcipher_alg *alg) +{ + int ret; + + ret = crypto_register_skcipher(alg); + if (!ret) + des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg; + return ret; +} + static void des_s390_exit(void) { while (des_s390_algs_num--) crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]); + while (des_s390_skciphers_num--) + crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]); if (ctrblk) free_page((unsigned long) ctrblk); } @@ -518,12 +443,12 @@ static int __init des_s390_init(void) ret = des_s390_register_alg(&des_alg); if (ret) goto out_err; - ret = des_s390_register_alg(&ecb_des_alg); + ret = des_s390_register_skcipher(&ecb_des_alg); if (ret) goto out_err; } if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) { - ret = des_s390_register_alg(&cbc_des_alg); + ret = des_s390_register_skcipher(&cbc_des_alg); if (ret) goto out_err; } @@ -531,12 +456,12 @@ static int __init des_s390_init(void) ret = des_s390_register_alg(&des3_alg); if (ret) goto out_err; - ret = des_s390_register_alg(&ecb_des3_alg); + ret = des_s390_register_skcipher(&ecb_des3_alg); if (ret) goto out_err; } if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) { - ret = des_s390_register_alg(&cbc_des3_alg); + ret = des_s390_register_skcipher(&cbc_des3_alg); if (ret) goto out_err; } @@ -551,12 +476,12 @@ static int __init des_s390_init(void) } if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) { - ret = des_s390_register_alg(&ctr_des_alg); + ret = des_s390_register_skcipher(&ctr_des_alg); if (ret) goto out_err; } if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) { - ret = des_s390_register_alg(&ctr_des3_alg); + ret = des_s390_register_skcipher(&ctr_des3_alg); if (ret) goto out_err; } diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 6184dceed340..c7119c617b6e 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -21,6 +21,7 @@ #include <linux/cpufeature.h> #include <linux/init.h> #include <linux/spinlock.h> +#include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include <asm/cpacf.h> #include <asm/pkey.h> @@ -123,27 +124,27 @@ static int __paes_set_key(struct s390_paes_ctx *ctx) return ctx->fc ? 0 : -EINVAL; } -static int ecb_paes_init(struct crypto_tfm *tfm) +static int ecb_paes_init(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb.key = NULL; return 0; } -static void ecb_paes_exit(struct crypto_tfm *tfm) +static void ecb_paes_exit(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); } -static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { int rc; - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); rc = _copy_key_to_kb(&ctx->kb, in_key, key_len); @@ -151,91 +152,75 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return rc; if (__paes_set_key(ctx)) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return 0; } -static int ecb_paes_crypt(struct blkcipher_desc *desc, - unsigned long modifier, - struct blkcipher_walk *walk) +static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) { - struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int nbytes, n, k; int ret; - ret = blkcipher_walk_virt(desc, walk); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + ret = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey, - walk->dst.virt.addr, walk->src.virt.addr, n); + walk.dst.virt.addr, walk.src.virt.addr, n); if (k) - ret = blkcipher_walk_done(desc, walk, nbytes - k); + ret = skcipher_walk_done(&walk, nbytes - k); if (k < n) { if (__paes_set_key(ctx) != 0) - return blkcipher_walk_done(desc, walk, -EIO); + return skcipher_walk_done(&walk, -EIO); } } return ret; } -static int ecb_paes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_paes_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk); + return ecb_paes_crypt(req, 0); } -static int ecb_paes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_paes_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk); + return ecb_paes_crypt(req, CPACF_DECRYPT); } -static struct crypto_alg ecb_paes_alg = { - .cra_name = "ecb(paes)", - .cra_driver_name = "ecb-paes-s390", - .cra_priority = 401, /* combo: aes + ecb + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_paes_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list), - .cra_init = ecb_paes_init, - .cra_exit = ecb_paes_exit, - .cra_u = { - .blkcipher = { - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .setkey = ecb_paes_set_key, - .encrypt = ecb_paes_encrypt, - .decrypt = ecb_paes_decrypt, - } - } +static struct skcipher_alg ecb_paes_alg = { + .base.cra_name = "ecb(paes)", + .base.cra_driver_name = "ecb-paes-s390", + .base.cra_priority = 401, /* combo: aes + ecb + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list), + .init = ecb_paes_init, + .exit = ecb_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .setkey = ecb_paes_set_key, + .encrypt = ecb_paes_encrypt, + .decrypt = ecb_paes_decrypt, }; -static int cbc_paes_init(struct crypto_tfm *tfm) +static int cbc_paes_init(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb.key = NULL; return 0; } -static void cbc_paes_exit(struct crypto_tfm *tfm) +static void cbc_paes_exit(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); } @@ -258,11 +243,11 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx) return ctx->fc ? 0 : -EINVAL; } -static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { int rc; - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); rc = _copy_key_to_kb(&ctx->kb, in_key, key_len); @@ -270,16 +255,17 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return rc; if (__cbc_paes_set_key(ctx)) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return 0; } -static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) { - struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int nbytes, n, k; int ret; struct { @@ -287,73 +273,60 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, u8 key[MAXPROTKEYSIZE]; } param; - ret = blkcipher_walk_virt(desc, walk); - memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); k = cpacf_kmc(ctx->fc | modifier, ¶m, - walk->dst.virt.addr, walk->src.virt.addr, n); - if (k) - ret = blkcipher_walk_done(desc, walk, nbytes - k); + walk.dst.virt.addr, walk.src.virt.addr, n); + if (k) { + memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - k); + } if (k < n) { if (__cbc_paes_set_key(ctx) != 0) - return blkcipher_walk_done(desc, walk, -EIO); + return skcipher_walk_done(&walk, -EIO); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); } } - memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); return ret; } -static int cbc_paes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_paes_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_paes_crypt(desc, 0, &walk); + return cbc_paes_crypt(req, 0); } -static int cbc_paes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_paes_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk); + return cbc_paes_crypt(req, CPACF_DECRYPT); } -static struct crypto_alg cbc_paes_alg = { - .cra_name = "cbc(paes)", - .cra_driver_name = "cbc-paes-s390", - .cra_priority = 402, /* ecb-paes-s390 + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_paes_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list), - .cra_init = cbc_paes_init, - .cra_exit = cbc_paes_exit, - .cra_u = { - .blkcipher = { - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = cbc_paes_set_key, - .encrypt = cbc_paes_encrypt, - .decrypt = cbc_paes_decrypt, - } - } +static struct skcipher_alg cbc_paes_alg = { + .base.cra_name = "cbc(paes)", + .base.cra_driver_name = "cbc-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list), + .init = cbc_paes_init, + .exit = cbc_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_paes_set_key, + .encrypt = cbc_paes_encrypt, + .decrypt = cbc_paes_decrypt, }; -static int xts_paes_init(struct crypto_tfm *tfm) +static int xts_paes_init(struct crypto_skcipher *tfm) { - struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb[0].key = NULL; ctx->kb[1].key = NULL; @@ -361,9 +334,9 @@ static int xts_paes_init(struct crypto_tfm *tfm) return 0; } -static void xts_paes_exit(struct crypto_tfm *tfm) +static void xts_paes_exit(struct crypto_skcipher *tfm) { - struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb[0]); _free_kb_keybuf(&ctx->kb[1]); @@ -391,11 +364,11 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx) return ctx->fc ? 0 : -EINVAL; } -static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int xts_key_len) { int rc; - struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); u8 ckey[2 * AES_MAX_KEY_SIZE]; unsigned int ckey_len, key_len; @@ -414,7 +387,7 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return rc; if (__xts_paes_set_key(ctx)) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -427,13 +400,14 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, AES_KEYSIZE_128 : AES_KEYSIZE_256; memcpy(ckey, ctx->pk[0].protkey, ckey_len); memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); - return xts_check_key(tfm, ckey, 2*ckey_len); + return xts_verify_key(tfm, ckey, 2*ckey_len); } -static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) { - struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; unsigned int keylen, offset, nbytes, n, k; int ret; struct { @@ -448,90 +422,76 @@ static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, u8 init[16]; } xts_param; - ret = blkcipher_walk_virt(desc, walk); + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; retry: memset(&pcc_param, 0, sizeof(pcc_param)); - memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); + memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); cpacf_pcc(ctx->fc, pcc_param.key + offset); memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); memcpy(xts_param.init, pcc_param.xts, 16); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + while ((nbytes = walk.nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, - walk->dst.virt.addr, walk->src.virt.addr, n); + walk.dst.virt.addr, walk.src.virt.addr, n); if (k) - ret = blkcipher_walk_done(desc, walk, nbytes - k); + ret = skcipher_walk_done(&walk, nbytes - k); if (k < n) { if (__xts_paes_set_key(ctx) != 0) - return blkcipher_walk_done(desc, walk, -EIO); + return skcipher_walk_done(&walk, -EIO); goto retry; } } return ret; } -static int xts_paes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int xts_paes_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return xts_paes_crypt(desc, 0, &walk); + return xts_paes_crypt(req, 0); } -static int xts_paes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int xts_paes_decrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return xts_paes_crypt(desc, CPACF_DECRYPT, &walk); + return xts_paes_crypt(req, CPACF_DECRYPT); } -static struct crypto_alg xts_paes_alg = { - .cra_name = "xts(paes)", - .cra_driver_name = "xts-paes-s390", - .cra_priority = 402, /* ecb-paes-s390 + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct s390_pxts_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list), - .cra_init = xts_paes_init, - .cra_exit = xts_paes_exit, - .cra_u = { - .blkcipher = { - .min_keysize = 2 * PAES_MIN_KEYSIZE, - .max_keysize = 2 * PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = xts_paes_set_key, - .encrypt = xts_paes_encrypt, - .decrypt = xts_paes_decrypt, - } - } +static struct skcipher_alg xts_paes_alg = { + .base.cra_name = "xts(paes)", + .base.cra_driver_name = "xts-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list), + .init = xts_paes_init, + .exit = xts_paes_exit, + .min_keysize = 2 * PAES_MIN_KEYSIZE, + .max_keysize = 2 * PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = xts_paes_set_key, + .encrypt = xts_paes_encrypt, + .decrypt = xts_paes_decrypt, }; -static int ctr_paes_init(struct crypto_tfm *tfm) +static int ctr_paes_init(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->kb.key = NULL; return 0; } -static void ctr_paes_exit(struct crypto_tfm *tfm) +static void ctr_paes_exit(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); } @@ -555,11 +515,11 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx) return ctx->fc ? 0 : -EINVAL; } -static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { int rc; - struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); _free_kb_keybuf(&ctx->kb); rc = _copy_key_to_kb(&ctx->kb, in_key, key_len); @@ -567,7 +527,7 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return rc; if (__ctr_paes_set_key(ctx)) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return 0; @@ -588,37 +548,37 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) return n; } -static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, - struct blkcipher_walk *walk) +static int ctr_paes_crypt(struct skcipher_request *req) { - struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); u8 buf[AES_BLOCK_SIZE], *ctrptr; + struct skcipher_walk walk; unsigned int nbytes, n, k; int ret, locked; locked = spin_trylock(&ctrblk_lock); - ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + ret = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { n = AES_BLOCK_SIZE; if (nbytes >= 2*AES_BLOCK_SIZE && locked) - n = __ctrblk_init(ctrblk, walk->iv, nbytes); - ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; - k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey, - walk->dst.virt.addr, walk->src.virt.addr, - n, ctrptr); + n = __ctrblk_init(ctrblk, walk.iv, nbytes); + ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; + k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr, + walk.src.virt.addr, n, ctrptr); if (k) { if (ctrptr == ctrblk) - memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE, + memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(walk->iv, AES_BLOCK_SIZE); - ret = blkcipher_walk_done(desc, walk, nbytes - n); + crypto_inc(walk.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, nbytes - n); } if (k < n) { if (__ctr_paes_set_key(ctx) != 0) { if (locked) spin_unlock(&ctrblk_lock); - return blkcipher_walk_done(desc, walk, -EIO); + return skcipher_walk_done(&walk, -EIO); } } } @@ -629,80 +589,54 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, */ if (nbytes) { while (1) { - if (cpacf_kmctr(ctx->fc | modifier, - ctx->pk.protkey, buf, - walk->src.virt.addr, AES_BLOCK_SIZE, - walk->iv) == AES_BLOCK_SIZE) + if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf, + walk.src.virt.addr, AES_BLOCK_SIZE, + walk.iv) == AES_BLOCK_SIZE) break; if (__ctr_paes_set_key(ctx) != 0) - return blkcipher_walk_done(desc, walk, -EIO); + return skcipher_walk_done(&walk, -EIO); } - memcpy(walk->dst.virt.addr, buf, nbytes); - crypto_inc(walk->iv, AES_BLOCK_SIZE); - ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk.dst.virt.addr, buf, nbytes); + crypto_inc(walk.iv, AES_BLOCK_SIZE); + ret = skcipher_walk_done(&walk, 0); } return ret; } -static int ctr_paes_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_paes_crypt(desc, 0, &walk); -} - -static int ctr_paes_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk); -} - -static struct crypto_alg ctr_paes_alg = { - .cra_name = "ctr(paes)", - .cra_driver_name = "ctr-paes-s390", - .cra_priority = 402, /* ecb-paes-s390 + 1 */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct s390_paes_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list), - .cra_init = ctr_paes_init, - .cra_exit = ctr_paes_exit, - .cra_u = { - .blkcipher = { - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ctr_paes_set_key, - .encrypt = ctr_paes_encrypt, - .decrypt = ctr_paes_decrypt, - } - } +static struct skcipher_alg ctr_paes_alg = { + .base.cra_name = "ctr(paes)", + .base.cra_driver_name = "ctr-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list), + .init = ctr_paes_init, + .exit = ctr_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_paes_set_key, + .encrypt = ctr_paes_crypt, + .decrypt = ctr_paes_crypt, + .chunksize = AES_BLOCK_SIZE, }; -static inline void __crypto_unregister_alg(struct crypto_alg *alg) +static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) { - if (!list_empty(&alg->cra_list)) - crypto_unregister_alg(alg); + if (!list_empty(&alg->base.cra_list)) + crypto_unregister_skcipher(alg); } static void paes_s390_fini(void) { if (ctrblk) free_page((unsigned long) ctrblk); - __crypto_unregister_alg(&ctr_paes_alg); - __crypto_unregister_alg(&xts_paes_alg); - __crypto_unregister_alg(&cbc_paes_alg); - __crypto_unregister_alg(&ecb_paes_alg); + __crypto_unregister_skcipher(&ctr_paes_alg); + __crypto_unregister_skcipher(&xts_paes_alg); + __crypto_unregister_skcipher(&cbc_paes_alg); + __crypto_unregister_skcipher(&ecb_paes_alg); } static int __init paes_s390_init(void) @@ -717,7 +651,7 @@ static int __init paes_s390_init(void) if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { - ret = crypto_register_alg(&ecb_paes_alg); + ret = crypto_register_skcipher(&ecb_paes_alg); if (ret) goto out_err; } @@ -725,14 +659,14 @@ static int __init paes_s390_init(void) if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { - ret = crypto_register_alg(&cbc_paes_alg); + ret = crypto_register_skcipher(&cbc_paes_alg); if (ret) goto out_err; } if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { - ret = crypto_register_alg(&xts_paes_alg); + ret = crypto_register_skcipher(&xts_paes_alg); if (ret) goto out_err; } @@ -740,7 +674,7 @@ static int __init paes_s390_init(void) if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { - ret = crypto_register_alg(&ctr_paes_alg); + ret = crypto_register_skcipher(&ctr_paes_alg); if (ret) goto out_err; ctrblk = (u8 *) __get_free_page(GFP_KERNEL); diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index d39e0f079217..686fe7aa192f 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) struct s390_sha_ctx *ctx = shash_desc_ctx(desc); unsigned int bsize = crypto_shash_blocksize(desc->tfm); u64 bits; - unsigned int n, mbl_offset; + unsigned int n; + int mbl_offset; n = ctx->count % bsize; bits = ctx->count * 8; - mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32); + mbl_offset = s390_crypto_shash_parmsize(ctx->func); if (mbl_offset < 0) return -EINVAL; + mbl_offset = mbl_offset / sizeof(u32); + /* set total msg bit length (mbl) in CPACF parmblock */ switch (ctx->func) { case CPACF_KLMD_SHA_1: diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index c2cf7bcdef9b..1c8a38f762a3 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -139,10 +139,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); * without volatile and memory clobber. */ #define alternative(oldinstr, altinstr, facility) \ - asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") + asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") #define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ - asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ + asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ altinstr2, facility2) ::: "memory") #endif /* __ASSEMBLY__ */ diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 713fc9735ffb..a2b11ac00f60 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -9,7 +9,7 @@ #ifdef CONFIG_DEBUG_BUGVERBOSE #define __EMIT_BUG(x) do { \ - asm volatile( \ + asm_inline volatile( \ "0: j 0b+2\n" \ "1:\n" \ ".section .rodata.str,\"aMS\",@progbits,1\n" \ @@ -28,7 +28,7 @@ #else /* CONFIG_DEBUG_BUGVERBOSE */ #define __EMIT_BUG(x) do { \ - asm volatile( \ + asm_inline volatile( \ "0: j 0b+2\n" \ "1:\n" \ ".section __bug_table,\"awM\",@progbits,%1\n" \ diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 819803a97c2b..0d90cbeb89b4 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -313,7 +313,7 @@ static inline unsigned long *trailer_entry_ptr(unsigned long v) return (unsigned long *) ret; } -/* Return if the entry in the sample data block table (sdbt) +/* Return true if the entry in the sample data block table (sdbt) * is a link to the next sdbt */ static inline int is_link_entry(unsigned long *s) { diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 60f907516335..ed5efbb531c4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -11,6 +11,7 @@ #include <linux/bits.h> #define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10) +#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35) #define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49) #define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50) #define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52) diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index ca421614722f..5a16f500515a 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -26,10 +26,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define IO_SPACE_LIMIT 0 -#define ioremap_nocache(addr, size) ioremap(addr, size) -#define ioremap_wc ioremap_nocache -#define ioremap_wt ioremap_nocache - void __iomem *ioremap(unsigned long offset, unsigned long size); void iounmap(volatile void __iomem *addr); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index abe60268335d..02f4c21c57f6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -392,6 +392,7 @@ struct kvm_vcpu_stat { u64 diagnose_10; u64 diagnose_44; u64 diagnose_9c; + u64 diagnose_9c_ignored; u64 diagnose_258; u64 diagnose_308; u64 diagnose_500; diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 823578c6b9e2..a4d38092530a 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -177,8 +177,6 @@ static inline int devmem_is_allowed(unsigned long pfn) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define ARCH_ZONE_DMA_BITS 31 - #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index a2399eff84ca..3a06c264ea53 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -2,9 +2,6 @@ #ifndef __ASM_S390_PCI_H #define __ASM_S390_PCI_H -/* must be set before including pci_clp.h */ -#define PCI_BAR_COUNT 6 - #include <linux/pci.h> #include <linux/mutex.h> #include <linux/iommu.h> @@ -138,7 +135,7 @@ struct zpci_dev { char res_name[16]; bool mio_capable; - struct zpci_bar_struct bars[PCI_BAR_COUNT]; + struct zpci_bar_struct bars[PCI_STD_NUM_BARS]; u64 start_dma; /* Start of available DMA addresses */ u64 end_dma; /* End of available DMA addresses */ diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index 50359172cc48..bd2cb4ea7d93 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h @@ -77,7 +77,7 @@ struct mio_info { struct { u64 wb; u64 wt; - } addr[PCI_BAR_COUNT]; + } addr[PCI_STD_NUM_BARS]; u32 reserved[6]; } __packed; @@ -98,9 +98,9 @@ struct clp_rsp_query_pci { u16 util_str_avail : 1; /* utility string available? */ u16 pfgid : 8; /* pci function group id */ u32 fid; /* pci function id */ - u8 bar_size[PCI_BAR_COUNT]; + u8 bar_size[PCI_STD_NUM_BARS]; u16 pchid; - __le32 bar[PCI_BAR_COUNT]; + __le32 bar[PCI_STD_NUM_BARS]; u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ u32 : 16; u8 fmb_len; diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 4652ffffe0b2..b9da71632827 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -12,6 +12,7 @@ #include <linux/perf_event.h> #include <linux/device.h> +#include <asm/stacktrace.h> /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 @@ -73,4 +74,10 @@ struct perf_sf_sde_regs { #define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) +#define perf_arch_fetch_caller_regs(regs, __ip) do { \ + (regs)->psw.addr = (__ip); \ + (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \ + offsetof(struct stack_frame, back_chain); \ +} while (0) + #endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index bccb8f4a63e2..77606c4acd58 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) crst_table_init(table, _REGION2_ENTRY_EMPTY); return (p4d_t *) table; } -#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) + +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (!mm_p4d_folded(mm)) + crst_table_free(mm, (unsigned long *) p4d); +} static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { @@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) crst_table_init(table, _REGION3_ENTRY_EMPTY); return (pud_t *) table; } -#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + if (!mm_pud_folded(mm)) + crst_table_free(mm, (unsigned long *) pud); +} static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { @@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { + if (mm_pmd_folded(mm)) + return; pgtable_pmd_page_dtor(virt_to_page(pmd)); crst_table_free(mm, (unsigned long *) pmd); } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5ff98d76a66c..7b03037a8475 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -266,11 +266,9 @@ static inline int is_module_addr(void *addr) #endif #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL -#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL /* Bits in the segment table entry */ #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL -#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ @@ -699,10 +697,8 @@ static inline int pmd_large(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { - if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0) + if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) return 1; - if (pmd_large(pmd)) - return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; } @@ -710,12 +706,10 @@ static inline int pud_bad(pud_t pud) { unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; - if (type > _REGION_ENTRY_TYPE_R3) + if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud)) return 1; if (type < _REGION_ENTRY_TYPE_R3) return 0; - if (pud_large(pud)) - return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0; return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; } @@ -758,18 +752,12 @@ static inline int pmd_write(pmd_t pmd) static inline int pmd_dirty(pmd_t pmd) { - int dirty = 1; - if (pmd_large(pmd)) - dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; - return dirty; + return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; } static inline int pmd_young(pmd_t pmd) { - int young = 1; - if (pmd_large(pmd)) - young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; - return young; + return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; } static inline int pte_present(pte_t pte) @@ -1173,8 +1161,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - if (!MACHINE_HAS_NX) - pte_val(entry) &= ~_PAGE_NOEXEC; if (pte_present(entry)) pte_val(entry) &= ~_PAGE_UNUSED; if (mm_has_pgste(mm)) @@ -1191,6 +1177,8 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); + if (!MACHINE_HAS_NX) + pte_val(__pte) &= ~_PAGE_NOEXEC; return pte_mkyoung(__pte); } @@ -1297,29 +1285,23 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd) static inline pmd_t pmd_mkwrite(pmd_t pmd) { pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; - if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) - return pmd; - pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; + if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) + pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; return pmd; } static inline pmd_t pmd_mkclean(pmd_t pmd) { - if (pmd_large(pmd)) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; - } + pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; + pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; return pmd; } static inline pmd_t pmd_mkdirty(pmd_t pmd) { - if (pmd_large(pmd)) { - pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | - _SEGMENT_ENTRY_SOFT_DIRTY; - if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) - pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; - } + pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY; + if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) + pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; return pmd; } @@ -1333,29 +1315,23 @@ static inline pud_t pud_wrprotect(pud_t pud) static inline pud_t pud_mkwrite(pud_t pud) { pud_val(pud) |= _REGION3_ENTRY_WRITE; - if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY)) - return pud; - pud_val(pud) &= ~_REGION_ENTRY_PROTECT; + if (pud_val(pud) & _REGION3_ENTRY_DIRTY) + pud_val(pud) &= ~_REGION_ENTRY_PROTECT; return pud; } static inline pud_t pud_mkclean(pud_t pud) { - if (pud_large(pud)) { - pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; - pud_val(pud) |= _REGION_ENTRY_PROTECT; - } + pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; + pud_val(pud) |= _REGION_ENTRY_PROTECT; return pud; } static inline pud_t pud_mkdirty(pud_t pud) { - if (pud_large(pud)) { - pud_val(pud) |= _REGION3_ENTRY_DIRTY | - _REGION3_ENTRY_SOFT_DIRTY; - if (pud_val(pud) & _REGION3_ENTRY_WRITE) - pud_val(pud) &= ~_REGION_ENTRY_PROTECT; - } + pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY; + if (pud_val(pud) & _REGION3_ENTRY_WRITE) + pud_val(pud) &= ~_REGION_ENTRY_PROTECT; return pud; } @@ -1379,38 +1355,29 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) static inline pmd_t pmd_mkyoung(pmd_t pmd) { - if (pmd_large(pmd)) { - pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; - if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) - pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; - } + pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; + if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) + pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; return pmd; } static inline pmd_t pmd_mkold(pmd_t pmd) { - if (pmd_large(pmd)) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; - pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; - } + pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; + pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; return pmd; } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { - if (pmd_large(pmd)) { - pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | - _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | - _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; - pmd_val(pmd) |= massage_pgprot_pmd(newprot); - if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; - if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) - pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; - return pmd; - } - pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; + pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | + _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | + _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; pmd_val(pmd) |= massage_pgprot_pmd(newprot); + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) + pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) + pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; return pmd; } diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 51a0e4a2dc96..361ef5eda468 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -206,7 +206,7 @@ unsigned long get_wchan(struct task_struct *p); /* Has task runtime instrumentation enabled ? */ #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) -static inline unsigned long current_stack_pointer(void) +static __always_inline unsigned long current_stack_pointer(void) { unsigned long sp; @@ -310,7 +310,7 @@ void enabled_wait(void); /* * Function to drop a processor into disabled wait state */ -static inline void __noreturn disabled_wait(void) +static __always_inline void __noreturn disabled_wait(void) { psw_t psw; diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index e3f238e8c611..71e3f0146cda 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -276,6 +276,7 @@ struct qdio_outbuf_state { #define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 #define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 +#define CHSC_AC2_SNIFFER_AVAILABLE 0x0008 #define CHSC_AC2_DATA_DIV_ENABLED 0x0002 #define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000 diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c02bff33f6c7..3a37172d5398 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -85,7 +85,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp) static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(int, lp->lock); - asm volatile( + asm_inline volatile( ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */ " sth %1,%0\n" : "=Q" (((unsigned short *) &lp->lock)[1]) diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 0ae4bbf7779c..ee056f4a4fa3 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -33,8 +33,8 @@ static inline bool on_stack(struct stack_info *info, return addr >= info->begin && addr + len <= info->end; } -static inline unsigned long get_stack_pointer(struct task_struct *task, - struct pt_regs *regs) +static __always_inline unsigned long get_stack_pointer(struct task_struct *task, + struct pt_regs *regs) { if (regs) return (unsigned long) kernel_stack_pointer(regs); @@ -62,6 +62,17 @@ struct stack_frame { }; #endif +/* + * Unlike current_stack_pointer() which simply returns current value of %r15 + * current_frame_address() returns function stack frame address, which matches + * %r15 upon function invocation. It may differ from %r15 later if function + * allocates stack for local variables or new stack frame to call other + * functions. + */ +#define current_frame_address() \ + ((unsigned long)__builtin_frame_address(0) - \ + offsetof(struct stack_frame, back_chain)) + #define CALL_ARGS_0() \ register unsigned long r2 asm("2") #define CALL_ARGS_1(arg1) \ @@ -95,20 +106,33 @@ struct stack_frame { #define CALL_ON_STACK(fn, stack, nr, args...) \ ({ \ + unsigned long frame = current_frame_address(); \ CALL_ARGS_##nr(args); \ unsigned long prev; \ \ asm volatile( \ " la %[_prev],0(15)\n" \ - " la 15,0(%[_stack])\n" \ - " stg %[_prev],%[_bc](15)\n" \ + " lg 15,%[_stack]\n" \ + " stg %[_frame],%[_bc](15)\n" \ " brasl 14,%[_fn]\n" \ " la 15,0(%[_prev])\n" \ : [_prev] "=&a" (prev), CALL_FMT_##nr \ - [_stack] "a" (stack), \ + [_stack] "R" (stack), \ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \ + [_frame] "d" (frame), \ [_fn] "X" (fn) : CALL_CLOBBER_##nr); \ r2; \ }) +#define CALL_ON_STACK_NORETURN(fn, stack) \ +({ \ + asm volatile( \ + " la 15,0(%[_stack])\n" \ + " xc %[_bc](8,15),%[_bc](15)\n" \ + " brasl 14,%[_fn]\n" \ + ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ + [_stack] "a" (stack), [_fn] "X" (fn)); \ + BUG(); \ +}) + #endif /* _ASM_S390_STACKTRACE_H */ diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 64539c221672..6da8885251d6 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -10,8 +10,9 @@ #ifndef _ASM_S390_TIMEX_H #define _ASM_S390_TIMEX_H -#include <asm/lowcore.h> +#include <linux/preempt.h> #include <linux/time64.h> +#include <asm/lowcore.h> /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL @@ -179,22 +180,24 @@ static inline cycles_t get_cycles(void) int get_phys_clock(unsigned long *clock); void init_cpu_timer(void); -unsigned long long monotonic_clock(void); extern unsigned char tod_clock_base[16] __aligned(8); /** * get_clock_monotonic - returns current time in clock rate units * - * The caller must ensure that preemption is disabled. * The clock and tod_clock_base get changed via stop_machine. - * Therefore preemption must be disabled when calling this - * function, otherwise the returned value is not guaranteed to - * be monotonic. + * Therefore preemption must be disabled, otherwise the returned + * value is not guaranteed to be monotonic. */ static inline unsigned long long get_tod_clock_monotonic(void) { - return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + unsigned long long tod; + + preempt_disable(); + tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + preempt_enable(); + return tod; } /** diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h index d827b5b9a32c..de9006b0cfeb 100644 --- a/arch/s390/include/asm/unwind.h +++ b/arch/s390/include/asm/unwind.h @@ -58,10 +58,11 @@ static inline bool unwind_error(struct unwind_state *state) static inline void unwind_start(struct unwind_state *state, struct task_struct *task, struct pt_regs *regs, - unsigned long sp) + unsigned long first_frame) { - sp = sp ? : get_stack_pointer(task, regs); - __unwind_start(state, task, regs, sp); + task = task ?: current; + first_frame = first_frame ?: get_stack_pointer(task, regs); + __unwind_start(state, task, regs, first_frame); } static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 169d7604eb80..3bcfdeb01395 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -41,8 +41,17 @@ struct vdso_data { struct vdso_per_cpu_data { __u64 ectg_timer_base; __u64 ectg_user_time; - __u32 cpu_nr; - __u32 node_id; + /* + * Note: node_id and cpu_nr must be at adjacent memory locations. + * VDSO userspace must read both values with a single instruction. + */ + union { + __u64 getcpu_val; + struct { + __u32 node_id; + __u32 cpu_nr; + }; + }; }; extern struct vdso_data *vdso_data; diff --git a/arch/s390/include/uapi/asm/ipcbuf.h b/arch/s390/include/uapi/asm/ipcbuf.h index 5b1c4f47c656..1030cd186899 100644 --- a/arch/s390/include/uapi/asm/ipcbuf.h +++ b/arch/s390/include/uapi/asm/ipcbuf.h @@ -2,6 +2,8 @@ #ifndef __S390_IPCBUF_H__ #define __S390_IPCBUF_H__ +#include <linux/posix_types.h> + /* * The user_ipc_perm structure for S/390 architecture. * Note extra padding because this structure is passed back and forth diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 7edbbcd8228a..2b1203cf7be6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -81,4 +81,3 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o # vdso obj-y += vdso64/ -obj-$(CONFIG_COMPAT_VDSO) += vdso32/ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 41ac4ad21311..ce33406cfe83 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -78,8 +78,7 @@ int main(void) OFFSET(__VDSO_TS_END, vdso_data, ts_end); OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); - OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr); - OFFSET(__VDSO_NODE_ID, vdso_per_cpu_data, node_id); + OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val); BLANK(); /* constants used by the vdso */ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 7abe6ae261b4..f304802ecf7b 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) ptr += sprintf(ptr, "%%c%i", value); else if (operand->flags & OPERAND_VR) ptr += sprintf(ptr, "%%v%i", value); - else if (operand->flags & OPERAND_PCREL) - ptr += sprintf(ptr, "%lx", (signed int) value - + addr); - else if (operand->flags & OPERAND_SIGNED) + else if (operand->flags & OPERAND_PCREL) { + void *pcrel = (void *)((int)value + addr); + + ptr += sprintf(ptr, "%px", pcrel); + } else if (operand->flags & OPERAND_SIGNED) ptr += sprintf(ptr, "%i", value); else ptr += sprintf(ptr, "%u", value); @@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs) else *ptr++ = ' '; addr = regs->psw.addr + start - 32; - ptr += sprintf(ptr, "%016lx: ", addr); + ptr += sprintf(ptr, "%px: ", (void *)addr); if (start + opsize >= end) break; for (i = 0; i < opsize; i++) @@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len) opsize = insn_length(*code); if (opsize > len) break; - ptr += sprintf(ptr, "%p: ", code); + ptr += sprintf(ptr, "%px: ", code); for (i = 0; i < opsize; i++) ptr += sprintf(ptr, "%02x", code[i]); *ptr++ = '\t'; diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 34bdc60c0b11..d306fe04489a 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -38,6 +38,7 @@ const char *stack_type_name(enum stack_type type) return "unknown"; } } +EXPORT_SYMBOL_GPL(stack_type_name); static inline bool in_stack(unsigned long sp, struct stack_info *info, enum stack_type type, unsigned long low, @@ -93,7 +94,9 @@ int get_stack_info(unsigned long sp, struct task_struct *task, if (!sp) goto unknown; - task = task ? : current; + /* Sanity check: ABI requires SP to be aligned 8 bytes. */ + if (sp & 0x7) + goto unknown; /* Check per-task stack */ if (in_task_stack(sp, task, info)) @@ -128,8 +131,6 @@ void show_stack(struct task_struct *task, unsigned long *stack) struct unwind_state state; printk("Call Trace:\n"); - if (!task) - task = current; unwind_for_each_frame(&state, task, NULL, (unsigned long) stack) printk(state.reliable ? " [<%016lx>] %pSR \n" : "([<%016lx>] %pSR)\n", diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b432d63d0b37..db32a55daaec 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -30,6 +30,7 @@ #include <asm/sclp.h> #include <asm/facility.h> #include <asm/boot_data.h> +#include <asm/switch_to.h> #include "entry.h" static void __init reset_tod_clock(void) @@ -238,7 +239,7 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_VX; __ctl_set_bit(0, 17); } - if (test_facility(130)) { + if (test_facility(130) && !noexec_disabled) { S390_lowcore.machine_flags |= MACHINE_FLAG_NX; __ctl_set_bit(0, 20); } @@ -260,6 +261,24 @@ static inline void save_vector_registers(void) #endif } +static inline void setup_control_registers(void) +{ + unsigned long reg; + + __ctl_store(reg, 0, 0); + reg |= CR0_LOW_ADDRESS_PROTECTION; + reg |= CR0_EMERGENCY_SIGNAL_SUBMASK; + reg |= CR0_EXTERNAL_CALL_SUBMASK; + __ctl_load(reg, 0, 0); +} + +static inline void setup_access_registers(void) +{ + unsigned int acrs[NUM_ACRS] = { 0 }; + + restore_access_regs(acrs); +} + static int __init disable_vector_extension(char *str) { S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; @@ -268,21 +287,6 @@ static int __init disable_vector_extension(char *str) } early_param("novx", disable_vector_extension); -static int __init noexec_setup(char *str) -{ - bool enabled; - int rc; - - rc = kstrtobool(str, &enabled); - if (!rc && !enabled) { - /* Disable no-execute support */ - S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX; - __ctl_clear_bit(0, 20); - } - return rc; -} -early_param("noexec", noexec_setup); - static int __init cad_setup(char *str) { bool enabled; @@ -332,5 +336,7 @@ void __init startup_init(void) save_vector_registers(); setup_topology(); sclp_early_detect(); + setup_control_registers(); + setup_access_registers(); lockdep_on(); } diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 0d9ee198f4eb..8b88dbbda7df 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -26,30 +26,17 @@ ENTRY(startup_continue) 0: larl %r1,tod_clock_base mvc 0(16,%r1),__LC_BOOT_CLOCK larl %r13,.LPG1 # get base - larl %r0,boot_vdso_data - stg %r0,__LC_VDSO_PER_CPU # # Setup stack # larl %r14,init_task stg %r14,__LC_CURRENT - larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD + larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE #ifdef CONFIG_KASAN brasl %r14,kasan_early_init #endif -# -# Early machine initialization and detection functions. -# - brasl %r14,startup_init - -# check control registers - stctg %c0,%c15,0(%r15) - oi 6(%r15),0x60 # enable sigp emergency & external call - oi 4(%r15),0x10 # switch on low address proctection - lctlg %c0,%c15,0(%r15) - - lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess - brasl %r14,start_kernel # go to C code + brasl %r14,startup_init # s390 specific early init + brasl %r14,start_kernel # common init code # # We returned from start_kernel ?!? PANIK # @@ -59,4 +46,3 @@ ENTRY(startup_continue) .align 16 .LPG1: .Ldw: .quad 0x0002000180000000,0x0000000000000000 -.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index b9d8fe45737a..8f8456816d83 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); static ssize_t show_idle_time(struct device *dev, struct device_attribute *attr, char *buf) { + unsigned long long now, idle_time, idle_enter, idle_exit, in_idle; struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); - unsigned long long now, idle_time, idle_enter, idle_exit; unsigned int seq; do { - now = get_tod_clock(); seq = read_seqcount_begin(&idle->seqcount); idle_time = READ_ONCE(idle->idle_time); idle_enter = READ_ONCE(idle->clock_idle_enter); idle_exit = READ_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); - idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; + in_idle = 0; + now = get_tod_clock(); + if (idle_enter) { + if (idle_exit) { + in_idle = idle_exit - idle_enter; + } else if (now > idle_enter) { + in_idle = now - idle_enter; + } + } + idle_time += in_idle; return sprintf(buf, "%llu\n", idle_time >> 12); } DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); @@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); u64 arch_cpu_idle_time(int cpu) { struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); - unsigned long long now, idle_enter, idle_exit; + unsigned long long now, idle_enter, idle_exit, in_idle; unsigned int seq; do { - now = get_tod_clock(); seq = read_seqcount_begin(&idle->seqcount); idle_enter = READ_ONCE(idle->clock_idle_enter); idle_exit = READ_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); - - return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0); + in_idle = 0; + now = get_tod_clock(); + if (idle_enter) { + if (idle_exit) { + in_idle = idle_exit - idle_enter; + } else if (now > idle_enter) { + in_idle = now - idle_enter; + } + } + return cputime_to_nsecs(in_idle); } void arch_cpu_idle_enter(void) diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 444a19125a81..cb8b1cc285c9 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image) #ifdef CONFIG_CRASH_DUMP int rc; + preempt_disable(); rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image); + preempt_enable(); return rc == 0; #else return false; @@ -254,10 +256,10 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); - mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); vmcoreinfo_append_str("SDMA=%lx\n", __sdma); vmcoreinfo_append_str("EDMA=%lx\n", __edma); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); } void machine_shutdown(void) diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c index 3b664cb3ec4d..d5035de9020e 100644 --- a/arch/s390/kernel/machine_kexec_reloc.c +++ b/arch/s390/kernel/machine_kexec_reloc.c @@ -27,6 +27,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, *(u32 *)loc = val; break; case R_390_64: /* Direct 64 bit. */ + case R_390_GLOB_DAT: *(u64 *)loc = val; break; case R_390_PC16: /* PC relative 16 bit. */ diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 48d48b6187c0..0eb1d1cc53a8 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = { [PERF_COUNT_HW_BUS_CYCLES] = -1, }; -static int __hw_perf_event_init(struct perf_event *event) +static int __hw_perf_event_init(struct perf_event *event, unsigned int type) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; @@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event) int err = 0; u64 ev; - switch (attr->type) { + switch (type) { case PERF_TYPE_RAW: /* Raw events are used to access counters directly, * hence do not permit excludes */ @@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event) static int cpumf_pmu_event_init(struct perf_event *event) { + unsigned int type = event->attr.type; int err; - switch (event->attr.type) { - case PERF_TYPE_HARDWARE: - case PERF_TYPE_HW_CACHE: - case PERF_TYPE_RAW: - err = __hw_perf_event_init(event); - break; - default: + if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW) + err = __hw_perf_event_init(event, type); + else if (event->pmu->type == type) + /* Registered as unknown PMU */ + err = __hw_perf_event_init(event, PERF_TYPE_RAW); + else return -ENOENT; - } if (unlikely(err) && event->destroy) event->destroy(event); @@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void) return -ENODEV; cpumf_pmu.attr_groups = cpumf_cf_event_group(); - rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); + rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1); if (rc) pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); return rc; diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c index 2654e348801a..e949ab832ed7 100644 --- a/arch/s390/kernel/perf_cpum_cf_diag.c +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event) int err = -ENOENT; debug_sprintf_event(cf_diag_dbg, 5, - "%s event %p cpu %d config %#llx " + "%s event %p cpu %d config %#llx type:%u " "sample_type %#llx cf_diag_events %d\n", __func__, - event, event->cpu, attr->config, attr->sample_type, - atomic_read(&cf_diag_events)); + event, event->cpu, attr->config, event->pmu->type, + attr->sample_type, atomic_read(&cf_diag_events)); if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG || - event->attr.type != PERF_TYPE_RAW) + event->attr.type != event->pmu->type) goto out; /* Raw events are used to access counters directly, @@ -693,7 +693,7 @@ static int __init cf_diag_init(void) } debug_register_view(cf_diag_dbg, &debug_sprintf_view); - rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW); + rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1); if (rc) { debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); debug_unregister(cf_diag_dbg); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 3d8b12a9a6ff..c07fdcd73726 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -156,8 +156,8 @@ static void free_sampling_buffer(struct sf_buffer *sfb) } } - debug_sprintf_event(sfdbg, 5, - "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); + debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, + (unsigned long)sfb->sdbt); memset(sfb, 0, sizeof(*sfb)); } @@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb, gfp_t gfp_flags) { int i, rc; - unsigned long *new, *tail; + unsigned long *new, *tail, *tail_prev = NULL; if (!sfb->sdbt || !sfb->tail) return -EINVAL; @@ -212,10 +212,11 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, * the sampling buffer origin. */ if (sfb->sdbt != get_next_sdbt(tail)) { - debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " - "sampling buffer is not linked: origin=%p" - "tail=%p\n", - (void *) sfb->sdbt, (void *) tail); + debug_sprintf_event(sfdbg, 3, "%s: " + "sampling buffer is not linked: origin %#lx" + " tail %#lx\n", __func__, + (unsigned long)sfb->sdbt, + (unsigned long)tail); return -EINVAL; } @@ -232,6 +233,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, sfb->num_sdbt++; /* Link current page to tail of chain */ *tail = (unsigned long)(void *) new + 1; + tail_prev = tail; tail = new; } @@ -241,18 +243,30 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, * issue, a new realloc call (if required) might succeed. */ rc = alloc_sample_data_block(tail, gfp_flags); - if (rc) + if (rc) { + /* Undo last SDBT. An SDBT with no SDB at its first + * entry but with an SDBT entry instead can not be + * handled by the interrupt handler code. + * Avoid this situation. + */ + if (tail_prev) { + sfb->num_sdbt--; + free_page((unsigned long) new); + tail = tail_prev; + } break; + } sfb->num_sdb++; tail++; + tail_prev = new = NULL; /* Allocated at least one SBD */ } /* Link sampling buffer to its origin */ *tail = (unsigned long) sfb->sdbt + 1; sfb->tail = tail; - debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" - " settings: sdbt=%lu sdb=%lu\n", + debug_sprintf_event(sfdbg, 4, "%s: new buffer" + " settings: sdbt %lu sdb %lu\n", __func__, sfb->num_sdbt, sfb->num_sdb); return rc; } @@ -292,12 +306,13 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); if (rc) { free_sampling_buffer(sfb); - debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " - "realloc_sampling_buffer failed with rc=%i\n", rc); + debug_sprintf_event(sfdbg, 4, "%s: " + "realloc_sampling_buffer failed with rc %i\n", + __func__, rc); } else debug_sprintf_event(sfdbg, 4, - "alloc_sampling_buffer: tear=%p dear=%p\n", - sfb->sdbt, (void *) *sfb->sdbt); + "%s: tear %#lx dear %#lx\n", __func__, + (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); return rc; } @@ -404,8 +419,8 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) return 0; debug_sprintf_event(sfdbg, 3, - "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" - " sample_size=%lu cpuhw=%p\n", + "%s: rate %lu f %lu sdb %lu/%lu" + " sample_size %lu cpuhw %p\n", __func__, SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), sample_size, cpuhw); @@ -465,8 +480,8 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, if (num) sfb_account_allocs(num, hwc); - debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" - " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); + debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", + __func__, OVERFLOW_REG(hwc), ratio, num); OVERFLOW_REG(hwc) = 0; } @@ -504,13 +519,13 @@ static void extend_sampling_buffer(struct sf_buffer *sfb, */ rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); if (rc) - debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " - "failed with rc=%i\n", rc); + debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", + __func__, rc); if (sfb_has_pending_allocs(sfb, hwc)) - debug_sprintf_event(sfdbg, 5, "sfb: extend: " - "req=%lu alloc=%lu remaining=%lu\n", - num, sfb->num_sdb - num_old, + debug_sprintf_event(sfdbg, 5, "%s: " + "req %lu alloc %lu remaining %lu\n", + __func__, num, sfb->num_sdb - num_old, sfb_pending_allocs(sfb, hwc)); } @@ -538,20 +553,22 @@ static void setup_pmc_cpu(void *flags) err = sf_disable(); if (err) pr_err("Switching off the sampling facility failed " - "with rc=%i\n", err); + "with rc %i\n", err); debug_sprintf_event(sfdbg, 5, - "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); + "%s: initialized: cpuhw %p\n", __func__, + cpusf); break; case PMC_RELEASE: cpusf->flags &= ~PMU_F_RESERVED; err = sf_disable(); if (err) { pr_err("Switching off the sampling facility failed " - "with rc=%i\n", err); + "with rc %i\n", err); } else deallocate_buffers(cpusf); debug_sprintf_event(sfdbg, 5, - "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); + "%s: released: cpuhw %p\n", __func__, + cpusf); break; } if (err) @@ -598,13 +615,6 @@ static void hw_init_period(struct hw_perf_event *hwc, u64 period) local64_set(&hwc->period_left, hwc->sample_period); } -static void hw_reset_registers(struct hw_perf_event *hwc, - unsigned long *sdbt_origin) -{ - /* (Re)set to first sample-data-block-table */ - TEAR_REG(hwc) = (unsigned long) sdbt_origin; -} - static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, unsigned long rate) { @@ -696,9 +706,9 @@ static unsigned long getrate(bool freq, unsigned long sample, */ if (sample_rate_to_freq(si, rate) > sysctl_perf_event_sample_rate) { - debug_sprintf_event(sfdbg, 1, + debug_sprintf_event(sfdbg, 1, "%s: " "Sampling rate exceeds maximum " - "perf sample rate\n"); + "perf sample rate\n", __func__); rate = 0; } } @@ -743,10 +753,9 @@ static int __hw_perf_event_init_rate(struct perf_event *event, attr->sample_period = rate; SAMPL_RATE(hwc) = rate; hw_init_period(hwc, SAMPL_RATE(hwc)); - debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:" - "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu, - event->attr.sample_period, event->attr.freq, - SAMPLE_FREQ_MODE(hwc)); + debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", + __func__, event->cpu, event->attr.sample_period, + event->attr.freq, SAMPLE_FREQ_MODE(hwc)); return 0; } @@ -949,8 +958,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu) * buffer extents */ sfb_account_overflows(cpuhw, hwc); - if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) - extend_sampling_buffer(&cpuhw->sfb, hwc); + extend_sampling_buffer(&cpuhw->sfb, hwc); } /* Rate may be adjusted with ioctl() */ cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); @@ -963,7 +971,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu) err = lsctl(&cpuhw->lsctl); if (err) { cpuhw->flags &= ~PMU_F_ENABLED; - pr_err("Loading sampling controls failed: op=%i err=%i\n", + pr_err("Loading sampling controls failed: op %i err %i\n", 1, err); return; } @@ -971,12 +979,11 @@ static void cpumsf_pmu_enable(struct pmu *pmu) /* Load current program parameter */ lpp(&S390_lowcore.lpp); - debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " - "interval:%lx tear=%p dear=%p\n", + debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " + "interval %#lx tear %#lx dear %#lx\n", __func__, cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, cpuhw->lsctl.cd, cpuhw->lsctl.interval, - (void *) cpuhw->lsctl.tear, - (void *) cpuhw->lsctl.dear); + cpuhw->lsctl.tear, cpuhw->lsctl.dear); } static void cpumsf_pmu_disable(struct pmu *pmu) @@ -999,13 +1006,14 @@ static void cpumsf_pmu_disable(struct pmu *pmu) err = lsctl(&inactive); if (err) { - pr_err("Loading sampling controls failed: op=%i err=%i\n", + pr_err("Loading sampling controls failed: op %i err %i\n", 2, err); return; } /* Save state of TEAR and DEAR register contents */ - if (!qsi(&si)) { + err = qsi(&si); + if (!err) { /* TEAR/DEAR values are valid only if the sampling facility is * enabled. Note that cpumsf_pmu_disable() might be called even * for a disabled sampling facility because cpumsf_pmu_enable() @@ -1016,8 +1024,8 @@ static void cpumsf_pmu_disable(struct pmu *pmu) cpuhw->lsctl.dear = si.dear; } } else - debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " - "qsi() failed with err=%i\n", err); + debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", + __func__, err); cpuhw->flags &= ~PMU_F_ENABLED; } @@ -1130,15 +1138,6 @@ static void perf_event_count_update(struct perf_event *event, u64 count) local64_add(count, &event->count); } -static void debug_sample_entry(struct hws_basic_entry *sample, - struct hws_trailer_entry *te) -{ - debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " - "sampling data entry: te->f=%i basic.def=%04x " - "(%p)\n", - te->f, sample->def, sample); -} - /* hw_collect_samples() - Walk through a sample-data-block and collect samples * @event: The perf event * @sdbt: Sample-data-block table @@ -1192,7 +1191,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, /* Count discarded samples */ *overflow += 1; } else { - debug_sample_entry(sample, te); + debug_sprintf_event(sfdbg, 4, + "%s: Found unknown" + " sampling data entry: te->f %i" + " basic.def %#4x (%p)\n", __func__, + te->f, sample->def, sample); /* Sample slot is not yet written or other record. * * This condition can occur if the buffer was reused @@ -1267,9 +1270,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) sampl_overflow += te->overflow; /* Timestamps are valid for full sample-data-blocks only */ - debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " - "overflow=%llu timestamp=%#llx\n", - sdbt, te->overflow, + debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " + "overflow %llu timestamp %#llx\n", + __func__, (unsigned long)sdbt, te->overflow, (te->f) ? trailer_timestamp(te) : 0ULL); /* Collect all samples from a single sample-data-block and @@ -1313,9 +1316,11 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + sampl_overflow, 1 + num_sdb); if (sampl_overflow || event_overflow) - debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " - "overflow stats: sample=%llu event=%llu\n", - sampl_overflow, event_overflow); + debug_sprintf_event(sfdbg, 4, "%s: " + "overflows: sample %llu event %llu" + " total %llu num_sdb %llu\n", + __func__, sampl_overflow, event_overflow, + OVERFLOW_REG(hwc), num_sdb); } #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) @@ -1368,7 +1373,7 @@ static void aux_output_end(struct perf_output_handle *handle) te = aux_sdb_trailer(aux, aux->alert_mark); te->flags &= ~SDB_TE_ALERT_REQ_MASK; - debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i); + debug_sprintf_event(sfdbg, 6, "%s: collect %#lx SDBs\n", __func__, i); } /* @@ -1426,10 +1431,10 @@ static int aux_output_begin(struct perf_output_handle *handle, cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); cpuhw->lsctl.dear = aux->sdb_index[head]; - debug_sprintf_event(sfdbg, 6, "aux_output_begin: " + debug_sprintf_event(sfdbg, 6, "%s: " "head->alert_mark->empty_mark (num_alert, range)" - "[%lx -> %lx -> %lx] (%lx, %lx) " - "tear index %lx, tear %lx dear %lx\n", + "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) " + "tear index %#lx, tear %#lx dear %#lx\n", __func__, aux->head, aux->alert_mark, aux->empty_mark, AUX_SDB_NUM_ALERT(aux), range, head / CPUM_SF_SDB_PER_TABLE, @@ -1573,7 +1578,9 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) pr_err("The AUX buffer with %lu pages for the " "diagnostic-sampling mode is full\n", num_sdb); - debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n"); + debug_sprintf_event(sfdbg, 1, + "%s: AUX buffer used up\n", + __func__); break; } if (WARN_ON_ONCE(!aux)) @@ -1596,23 +1603,25 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) perf_aux_output_end(&cpuhw->handle, size); pr_err("Sample data caused the AUX buffer with %lu " "pages to overflow\n", num_sdb); - debug_sprintf_event(sfdbg, 1, "head %lx range %lx " - "overflow %llx\n", + debug_sprintf_event(sfdbg, 1, "%s: head %#lx range %#lx " + "overflow %#llx\n", __func__, aux->head, range, overflow); } else { size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; perf_aux_output_end(&cpuhw->handle, size); - debug_sprintf_event(sfdbg, 6, "head %lx alert %lx " + debug_sprintf_event(sfdbg, 6, "%s: head %#lx alert %#lx " "already full, try another\n", + __func__, aux->head, aux->alert_mark); } } if (done) - debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: " - "[%lx -> %lx -> %lx] (%lx, %lx)\n", - aux->head, aux->alert_mark, aux->empty_mark, - AUX_SDB_NUM_ALERT(aux), range); + debug_sprintf_event(sfdbg, 6, "%s: aux_reset_buffer " + "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n", + __func__, aux->head, aux->alert_mark, + aux->empty_mark, AUX_SDB_NUM_ALERT(aux), + range); } /* @@ -1635,8 +1644,8 @@ static void aux_buffer_free(void *data) kfree(aux->sdb_index); kfree(aux); - debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free " - "%lu SDBTs\n", num_sdbt); + debug_sprintf_event(sfdbg, 4, "%s: free " + "%lu SDBTs\n", __func__, num_sdbt); } static void aux_sdb_init(unsigned long sdb) @@ -1744,9 +1753,8 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, */ aux->empty_mark = sfb->num_sdb - 1; - debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs" - " and %lu SDBs\n", - sfb->num_sdbt, sfb->num_sdb); + debug_sprintf_event(sfdbg, 4, "%s: setup %lu SDBTs and %lu SDBs\n", + __func__, sfb->num_sdbt, sfb->num_sdb); return aux; @@ -1799,9 +1807,9 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) event->attr.sample_period = rate; SAMPL_RATE(&event->hw) = rate; hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); - debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:" - "cpu:%d value:%llx period:%llx freq:%d\n", - event->cpu, value, + debug_sprintf_event(sfdbg, 4, "%s:" + " cpu %d value %#llx period %#llx freq %d\n", + __func__, event->cpu, value, event->attr.sample_period, do_freq); return 0; } @@ -1877,7 +1885,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags) if (!SAMPL_DIAG_MODE(&event->hw)) { cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; - hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); + TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; } /* Ensure sampling functions are in the disabled state. If disabled, @@ -2032,7 +2040,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, /* Report measurement alerts only for non-PRA codes */ if (alert != CPU_MF_INT_SF_PRA) - debug_sprintf_event(sfdbg, 6, "measurement alert: %#x\n", + debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, alert); /* Sampling authorization change request */ @@ -2111,7 +2119,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp) sfb_set_limits(min, max); pr_info("The sampling buffer limits have changed to: " - "min=%lu max=%lu (diag=x%lu)\n", + "min %lu max %lu (diag %lu)\n", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); return 0; } @@ -2129,7 +2137,7 @@ static const struct kernel_param_ops param_ops_sfb_size = { static void __init pr_cpumsf_err(unsigned int reason) { pr_err("Sampling facility support for perf is not available: " - "reason=%04x\n", reason); + "reason %#x\n", reason); } static int __init init_cpum_sampling_pmu(void) diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index fcb6c2e92b07..1e75cc983546 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct unwind_state state; + unsigned long addr; - unwind_for_each_frame(&state, current, regs, 0) - perf_callchain_store(entry, state.ip); + unwind_for_each_frame(&state, current, regs, 0) { + addr = unwind_get_return_address(&state); + if (!addr || perf_callchain_store(entry, addr)) + return; + } } /* Perf definitions for PMU event attributes in sysfs */ diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index b0afec673f77..6ccef5f29761 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -40,6 +40,7 @@ #include <asm/stacktrace.h> #include <asm/switch_to.h> #include <asm/runtime_instr.h> +#include <asm/unwind.h> #include "entry.h" asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); @@ -178,9 +179,8 @@ EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { - struct stack_frame *sf, *low, *high; - unsigned long return_address; - int count; + struct unwind_state state; + unsigned long ip = 0; if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) return 0; @@ -188,26 +188,22 @@ unsigned long get_wchan(struct task_struct *p) if (!try_get_task_stack(p)) return 0; - low = task_stack_page(p); - high = (struct stack_frame *) task_pt_regs(p); - sf = (struct stack_frame *) p->thread.ksp; - if (sf <= low || sf > high) { - return_address = 0; - goto out; - } - for (count = 0; count < 16; count++) { - sf = (struct stack_frame *)READ_ONCE_NOCHECK(sf->back_chain); - if (sf <= low || sf > high) { - return_address = 0; - goto out; + unwind_for_each_frame(&state, p, NULL, 0) { + if (state.stack_info.type != STACK_TYPE_TASK) { + ip = 0; + break; } - return_address = READ_ONCE_NOCHECK(sf->gprs[8]); - if (!in_sched_functions(return_address)) - goto out; + + ip = unwind_get_return_address(&state); + if (!ip) + break; + + if (!in_sched_functions(ip)) + break; } -out: + put_task_stack(p); - return return_address; + return ip; } unsigned long arch_align_stack(unsigned long sp) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ad71132374f0..58faa12542a1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -856,7 +856,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) } /* Do the secure computing check after ptrace. */ - if (secure_computing(NULL)) { + if (secure_computing()) { /* seccomp failures shouldn't expose any additional code. */ return -1; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 3ff291bc63b7..9cbf490fd162 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -355,7 +355,6 @@ early_initcall(async_stack_realloc); void __init arch_call_rest_init(void) { - struct stack_frame *frame; unsigned long stack; stack = stack_alloc(); @@ -368,13 +367,7 @@ void __init arch_call_rest_init(void) set_task_stack_end_magic(current); stack += STACK_INIT_OFFSET; S390_lowcore.kernel_stack = stack; - frame = (struct stack_frame *) stack; - memset(frame, 0, sizeof(*frame)); - /* Branch to rest_init on the new stack, never returns */ - asm volatile( - " la 15,0(%[_frame])\n" - " jg rest_init\n" - : : [_frame] "a" (frame)); + CALL_ON_STACK_NORETURN(rest_init, stack); } static void __init setup_lowcore_dat_off(void) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 44974654cbd0..2794cad9312e 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -262,10 +262,13 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->spinlock_index = 0; lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; + lc->user_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; lc->user_timer = lc->system_timer = lc->steal_timer = lc->avg_steal_timer = 0; __ctl_store(lc->cregs_save_area, 0, 15); + lc->cregs_save_area[1] = lc->kernel_asce; + lc->cregs_save_area[7] = lc->vdso_asce; save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, sizeof(lc->stfle_fac_list)); @@ -724,39 +727,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early) static int smp_add_present_cpu(int cpu); -static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) +static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, + bool configured, bool early) { struct pcpu *pcpu; - cpumask_t avail; - int cpu, nr, i, j; + int cpu, nr, i; u16 address; nr = 0; - cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); - cpu = cpumask_first(&avail); - for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { - if (sclp.has_core_type && info->core[i].type != boot_core_type) + if (sclp.has_core_type && core->type != boot_core_type) + return nr; + cpu = cpumask_first(avail); + address = core->core_id << smp_cpu_mt_shift; + for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { + if (pcpu_find_address(cpu_present_mask, address + i)) continue; - address = info->core[i].core_id << smp_cpu_mt_shift; - for (j = 0; j <= smp_cpu_mtid; j++) { - if (pcpu_find_address(cpu_present_mask, address + j)) - continue; - pcpu = pcpu_devices + cpu; - pcpu->address = address + j; - pcpu->state = - (cpu >= info->configured*(smp_cpu_mtid + 1)) ? - CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - set_cpu_present(cpu, true); - if (sysfs_add && smp_add_present_cpu(cpu) != 0) - set_cpu_present(cpu, false); - else - nr++; - cpu = cpumask_next(cpu, &avail); - if (cpu >= nr_cpu_ids) + pcpu = pcpu_devices + cpu; + pcpu->address = address + i; + if (configured) + pcpu->state = CPU_STATE_CONFIGURED; + else + pcpu->state = CPU_STATE_STANDBY; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + set_cpu_present(cpu, true); + if (!early && smp_add_present_cpu(cpu) != 0) + set_cpu_present(cpu, false); + else + nr++; + cpumask_clear_cpu(cpu, avail); + cpu = cpumask_next(cpu, avail); + } + return nr; +} + +static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) +{ + struct sclp_core_entry *core; + cpumask_t avail; + bool configured; + u16 core_id; + int nr, i; + + nr = 0; + cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); + /* + * Add IPL core first (which got logical CPU number 0) to make sure + * that all SMT threads get subsequent logical CPU numbers. + */ + if (early) { + core_id = pcpu_devices[0].address >> smp_cpu_mt_shift; + for (i = 0; i < info->configured; i++) { + core = &info->core[i]; + if (core->core_id == core_id) { + nr += smp_add_core(core, &avail, true, early); break; + } } } + for (i = 0; i < info->combined; i++) { + configured = i < info->configured; + nr += smp_add_core(&info->core[i], &avail, configured, early); + } return nr; } @@ -805,7 +836,7 @@ void __init smp_detect_cpus(void) /* Add CPUs present at boot */ get_online_cpus(); - __smp_rescan_cpus(info, 0); + __smp_rescan_cpus(info, true); put_online_cpus(); memblock_free_early((unsigned long)info, sizeof(*info)); } @@ -816,6 +847,8 @@ static void smp_init_secondary(void) S390_lowcore.last_update_clock = get_tod_clock(); restore_access_regs(S390_lowcore.access_regs_save_area); + set_cpu_flag(CIF_ASCE_PRIMARY); + set_cpu_flag(CIF_ASCE_SECONDARY); cpu_init(); preempt_disable(); init_cpu_timer(); @@ -843,7 +876,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid) S390_lowcore.restart_source = -1UL; __ctl_load(S390_lowcore.cregs_save_area, 0, 15); __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); - CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); + CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); } /* Upping and downing of CPUs */ @@ -1148,7 +1181,7 @@ int __ref smp_rescan_cpus(void) smp_get_core_info(info, 0); get_online_cpus(); mutex_lock(&smp_cpu_state_mutex); - nr = __smp_rescan_cpus(info, 1); + nr = __smp_rescan_cpus(info, false); mutex_unlock(&smp_cpu_state_mutex); put_online_cpus(); kfree(info); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index f8fc4f8aef9b..fc5419ac64c8 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -9,6 +9,7 @@ #include <linux/stacktrace.h> #include <asm/stacktrace.h> #include <asm/unwind.h> +#include <asm/kprobes.h> void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) @@ -22,3 +23,45 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, break; } } + +/* + * This function returns an error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. + */ +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task) +{ + struct unwind_state state; + unsigned long addr; + + unwind_for_each_frame(&state, task, NULL, 0) { + if (state.stack_info.type != STACK_TYPE_TASK) + return -EINVAL; + + if (state.regs) + return -EINVAL; + + addr = unwind_get_return_address(&state); + if (!addr) + return -EINVAL; + +#ifdef CONFIG_KPROBES + /* + * Mark stacktraces with kretprobed functions on them + * as unreliable. + */ + if (state.ip == (unsigned long)kretprobe_trampoline) + return -EINVAL; +#endif + + if (!consume_entry(cookie, addr, false)) + return -EINVAL; + } + + /* Check for stack corruption */ + if (unwind_error(&state)) + return -EINVAL; + return 0; +} diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e8766beee5ad..f9d070d016e3 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -110,15 +110,6 @@ unsigned long long notrace sched_clock(void) } NOKPROBE_SYMBOL(sched_clock); -/* - * Monotonic_clock - returns # of nanoseconds passed since time_init() - */ -unsigned long long monotonic_clock(void) -{ - return sched_clock(); -} -EXPORT_SYMBOL(monotonic_clock); - static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt) { unsigned long long high, low, rem, sec, nsec; diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c index 8fc9daae47a2..da2d4d4c5b0e 100644 --- a/arch/s390/kernel/unwind_bc.c +++ b/arch/s390/kernel/unwind_bc.c @@ -36,6 +36,12 @@ static bool update_stack_info(struct unwind_state *state, unsigned long sp) return true; } +static inline bool is_task_pt_regs(struct unwind_state *state, + struct pt_regs *regs) +{ + return task_pt_regs(state->task) == regs; +} + bool unwind_next_frame(struct unwind_state *state) { struct stack_info *info = &state->stack_info; @@ -46,15 +52,16 @@ bool unwind_next_frame(struct unwind_state *state) regs = state->regs; if (unlikely(regs)) { - sp = READ_ONCE_NOCHECK(regs->gprs[15]); - if (unlikely(outside_of_stack(state, sp))) { - if (!update_stack_info(state, sp)) - goto out_err; - } + sp = state->sp; sf = (struct stack_frame *) sp; ip = READ_ONCE_NOCHECK(sf->gprs[8]); reliable = false; regs = NULL; + if (!__kernel_text_address(ip)) { + /* skip bogus %r14 */ + state->regs = NULL; + return unwind_next_frame(state); + } } else { sf = (struct stack_frame *) state->sp; sp = READ_ONCE_NOCHECK(sf->back_chain); @@ -71,21 +78,25 @@ bool unwind_next_frame(struct unwind_state *state) /* No back-chain, look for a pt_regs structure */ sp = state->sp + STACK_FRAME_OVERHEAD; if (!on_stack(info, sp, sizeof(struct pt_regs))) - goto out_stop; + goto out_err; regs = (struct pt_regs *) sp; - if (READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE) + if (is_task_pt_regs(state, regs)) goto out_stop; ip = READ_ONCE_NOCHECK(regs->psw.addr); + sp = READ_ONCE_NOCHECK(regs->gprs[15]); + if (unlikely(outside_of_stack(state, sp))) { + if (!update_stack_info(state, sp)) + goto out_err; + } reliable = true; } } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Decode any ftrace redirection */ - if (ip == (unsigned long) return_to_handler) - ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - ip, (void *) sp); -#endif + /* Sanity check: ABI requires SP to be aligned 8 bytes. */ + if (sp & 0x7) + goto out_err; + + ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp); /* Update unwind state */ state->sp = sp; @@ -103,13 +114,11 @@ out_stop: EXPORT_SYMBOL_GPL(unwind_next_frame); void __unwind_start(struct unwind_state *state, struct task_struct *task, - struct pt_regs *regs, unsigned long sp) + struct pt_regs *regs, unsigned long first_frame) { struct stack_info *info = &state->stack_info; - unsigned long *mask = &state->stack_mask; struct stack_frame *sf; - unsigned long ip; - bool reliable; + unsigned long ip, sp; memset(state, 0, sizeof(*state)); state->task = task; @@ -121,35 +130,46 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, return; } + /* Get the instruction pointer from pt_regs or the stack frame */ + if (regs) { + ip = regs->psw.addr; + sp = regs->gprs[15]; + } else if (task == current) { + sp = current_frame_address(); + } else { + sp = task->thread.ksp; + } + /* Get current stack pointer and initialize stack info */ - if (get_stack_info(sp, task, info, mask) != 0 || - !on_stack(info, sp, sizeof(struct stack_frame))) { + if (!update_stack_info(state, sp)) { /* Something is wrong with the stack pointer */ info->type = STACK_TYPE_UNKNOWN; state->error = true; return; } - /* Get the instruction pointer from pt_regs or the stack frame */ - if (regs) { - ip = READ_ONCE_NOCHECK(regs->psw.addr); - reliable = true; - } else { - sf = (struct stack_frame *) sp; + if (!regs) { + /* Stack frame is within valid stack */ + sf = (struct stack_frame *)sp; ip = READ_ONCE_NOCHECK(sf->gprs[8]); - reliable = false; } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Decode any ftrace redirection */ - if (ip == (unsigned long) return_to_handler) - ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - ip, NULL); -#endif + ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL); /* Update unwind state */ state->sp = sp; state->ip = ip; - state->reliable = reliable; + state->reliable = true; + + if (!first_frame) + return; + /* Skip through the call chain to the specified starting frame */ + while (!unwind_done(state)) { + if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) { + if (state->sp >= first_frame) + break; + } + unwind_next_frame(state); + } } EXPORT_SYMBOL_GPL(__unwind_start); diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index ed1fc08ccea2..bcc9bdb39ba2 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -29,13 +29,6 @@ #include <asm/vdso.h> #include <asm/facility.h> -#ifdef CONFIG_COMPAT_VDSO -extern char vdso32_start, vdso32_end; -static void *vdso32_kbase = &vdso32_start; -static unsigned int vdso32_pages; -static struct page **vdso32_pagelist; -#endif - extern char vdso64_start, vdso64_end; static void *vdso64_kbase = &vdso64_start; static unsigned int vdso64_pages; @@ -55,12 +48,6 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; -#ifdef CONFIG_COMPAT_VDSO - if (vma->vm_mm->context.compat_mm) { - vdso_pagelist = vdso32_pagelist; - vdso_pages = vdso32_pages; - } -#endif if (vmf->pgoff >= vdso_pages) return VM_FAULT_SIGBUS; @@ -76,10 +63,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm, unsigned long vdso_pages; vdso_pages = vdso64_pages; -#ifdef CONFIG_COMPAT_VDSO - if (vma->vm_mm->context.compat_mm) - vdso_pages = vdso32_pages; -#endif if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) return -EINVAL; @@ -209,12 +192,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!vdso_enabled) return 0; + if (is_compat_task()) + return 0; + vdso_pages = vdso64_pages; -#ifdef CONFIG_COMPAT_VDSO - mm->context.compat_mm = is_compat_task(); - if (mm->context.compat_mm) - vdso_pages = vdso32_pages; -#endif /* * vDSO has a problem and was disabled, just don't "enable" it for * the process @@ -267,23 +248,6 @@ static int __init vdso_init(void) int i; vdso_init_data(vdso_data); -#ifdef CONFIG_COMPAT_VDSO - /* Calculate the size of the 32 bit vDSO */ - vdso32_pages = ((&vdso32_end - &vdso32_start - + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; - - /* Make sure pages are in the correct state */ - vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), - GFP_KERNEL); - BUG_ON(vdso32_pagelist == NULL); - for (i = 0; i < vdso32_pages - 1; i++) { - struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); - get_page(pg); - vdso32_pagelist[i] = pg; - } - vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); - vdso32_pagelist[vdso32_pages] = NULL; -#endif /* Calculate the size of the 64 bit vDSO */ vdso64_pages = ((&vdso64_end - &vdso64_start diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore deleted file mode 100644 index e45fba9d0ced..000000000000 --- a/arch/s390/kernel/vdso32/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vdso32.lds diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile deleted file mode 100644 index aee9ffbccb54..000000000000 --- a/arch/s390/kernel/vdso32/Makefile +++ /dev/null @@ -1,66 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# List of files in the vdso, has to be asm only for now - -KCOV_INSTRUMENT := n - -obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o - -# Build rules - -targets := $(obj-vdso32) vdso32.so vdso32.so.dbg -obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) - -KBUILD_AFLAGS += -DBUILD_VDSO -KBUILD_CFLAGS += -DBUILD_VDSO - -KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS)) -KBUILD_AFLAGS_31 += -m31 -s - -KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) -KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin -KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ - -Wl,--hash-style=both - -$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) -$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) - -obj-y += vdso32_wrapper.o -extra-y += vdso32.lds -CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) - -# Disable gcov profiling, ubsan and kasan for VDSO code -GCOV_PROFILE := n -UBSAN_SANITIZE := n -KASAN_SANITIZE := n - -# Force dependency (incbin is bad) -$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so - -# link rule for the .so file, .lds has to be first -$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE - $(call if_changed,vdso32ld) - -# strip rule for the .so file -$(obj)/%.so: OBJCOPYFLAGS := -S -$(obj)/%.so: $(obj)/%.so.dbg FORCE - $(call if_changed,objcopy) - -# assembly rules for the .S files -$(obj-vdso32): %.o: %.S FORCE - $(call if_changed_dep,vdso32as) - -# actual build commands -quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ -quiet_cmd_vdso32as = VDSO32A $@ - cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< - -# install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ - cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ - -vdso32.so: $(obj)/vdso32.so.dbg - @mkdir -p $(MODLIB)/vdso - $(call cmd,vdso_install) - -vdso_install: vdso32.so diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S deleted file mode 100644 index eaf9cf1417f6..000000000000 --- a/arch/s390/kernel/vdso32/clock_getres.S +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of clock_getres() for 32 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2008 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/dwarf.h> - - .text - .align 4 - .globl __kernel_clock_getres - .type __kernel_clock_getres,@function -__kernel_clock_getres: - CFI_STARTPROC - basr %r1,0 - la %r1,4f-.(%r1) - chi %r2,__CLOCK_REALTIME - je 0f - chi %r2,__CLOCK_MONOTONIC - je 0f - la %r1,5f-4f(%r1) - chi %r2,__CLOCK_REALTIME_COARSE - je 0f - chi %r2,__CLOCK_MONOTONIC_COARSE - jne 3f -0: ltr %r3,%r3 - jz 2f /* res == NULL */ -1: l %r0,0(%r1) - xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ - st %r0,4(%r3) /* store tp->tv_usec */ -2: lhi %r2,0 - br %r14 -3: lhi %r1,__NR_clock_getres /* fallback to svc */ - svc 0 - br %r14 - CFI_ENDPROC -4: .long __CLOCK_REALTIME_RES -5: .long __CLOCK_COARSE_RES - .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S deleted file mode 100644 index ada5c11a16e5..000000000000 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ /dev/null @@ -1,179 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of clock_gettime() for 32 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2008 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/dwarf.h> -#include <asm/ptrace.h> - - .text - .align 4 - .globl __kernel_clock_gettime - .type __kernel_clock_gettime,@function -__kernel_clock_gettime: - CFI_STARTPROC - ahi %r15,-16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD - basr %r5,0 -0: al %r5,21f-0b(%r5) /* get &_vdso_data */ - chi %r2,__CLOCK_REALTIME_COARSE - je 10f - chi %r2,__CLOCK_REALTIME - je 11f - chi %r2,__CLOCK_MONOTONIC_COARSE - je 9f - chi %r2,__CLOCK_MONOTONIC - jne 19f - - /* CLOCK_MONOTONIC */ -1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ - tml %r4,0x0001 /* pending update ? loop */ - jnz 1b - stcke 0(%r15) /* Store TOD clock */ - lm %r0,%r1,1(%r15) - s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - sl %r1,__VDSO_XTIME_STAMP+4(%r5) - brc 3,2f - ahi %r0,-1 -2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ - lr %r2,%r0 - l %r0,__VDSO_TK_MULT(%r5) - ltr %r1,%r1 - mr %r0,%r0 - jnm 3f - a %r0,__VDSO_TK_MULT(%r5) -3: alr %r0,%r2 - al %r0,__VDSO_WTOM_NSEC(%r5) - al %r1,__VDSO_WTOM_NSEC+4(%r5) - brc 12,5f - ahi %r0,1 -5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - srdl %r0,0(%r2) /* >> tk->shift */ - l %r2,__VDSO_WTOM_SEC+4(%r5) - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ - jne 1b - basr %r5,0 -6: ltr %r0,%r0 - jnz 7f - cl %r1,20f-6b(%r5) - jl 8f -7: ahi %r2,1 - sl %r1,20f-6b(%r5) - brc 3,6b - ahi %r0,-1 - j 6b -8: st %r2,0(%r3) /* store tp->tv_sec */ - st %r1,4(%r3) /* store tp->tv_nsec */ - lhi %r2,0 - ahi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - - /* CLOCK_MONOTONIC_COARSE */ - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD -9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ - tml %r4,0x0001 /* pending update ? loop */ - jnz 9b - l %r2,__VDSO_WTOM_CRS_SEC+4(%r5) - l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5) - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ - jne 9b - j 8b - - /* CLOCK_REALTIME_COARSE */ -10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ - tml %r4,0x0001 /* pending update ? loop */ - jnz 10b - l %r2,__VDSO_XTIME_CRS_SEC+4(%r5) - l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5) - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ - jne 10b - j 17f - - /* CLOCK_REALTIME */ -11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ - tml %r4,0x0001 /* pending update ? loop */ - jnz 11b - stcke 0(%r15) /* Store TOD clock */ - lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */ - s %r0,1(%r15) /* no - ts_steering_end */ - sl %r1,5(%r15) - brc 3,22f - ahi %r0,-1 -22: ltr %r0,%r0 /* past end of steering? */ - jm 24f - srdl %r0,15 /* 1 per 2^16 */ - tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ - jz 23f - lcr %r0,%r0 /* negative TOD offset */ - lcr %r1,%r1 - je 23f - ahi %r0,-1 -23: a %r0,1(%r15) /* add TOD timestamp */ - al %r1,5(%r15) - brc 12,25f - ahi %r0,1 - j 25f -24: lm %r0,%r1,1(%r15) /* load TOD timestamp */ -25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - sl %r1,__VDSO_XTIME_STAMP+4(%r5) - brc 3,12f - ahi %r0,-1 -12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ - lr %r2,%r0 - l %r0,__VDSO_TK_MULT(%r5) - ltr %r1,%r1 - mr %r0,%r0 - jnm 13f - a %r0,__VDSO_TK_MULT(%r5) -13: alr %r0,%r2 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ - al %r1,__VDSO_XTIME_NSEC+4(%r5) - brc 12,14f - ahi %r0,1 -14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - srdl %r0,0(%r2) /* >> tk->shift */ - l %r2,__VDSO_XTIME_SEC+4(%r5) - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ - jne 11b - basr %r5,0 -15: ltr %r0,%r0 - jnz 16f - cl %r1,20f-15b(%r5) - jl 17f -16: ahi %r2,1 - sl %r1,20f-15b(%r5) - brc 3,15b - ahi %r0,-1 - j 15b -17: st %r2,0(%r3) /* store tp->tv_sec */ - st %r1,4(%r3) /* store tp->tv_nsec */ - lhi %r2,0 - ahi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - - /* Fallback to system call */ - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD -19: lhi %r1,__NR_clock_gettime - svc 0 - ahi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - CFI_ENDPROC - -20: .long 1000000000 -21: .long _vdso_data - 0b - .size __kernel_clock_gettime,.-__kernel_clock_gettime diff --git a/arch/s390/kernel/vdso32/getcpu.S b/arch/s390/kernel/vdso32/getcpu.S deleted file mode 100644 index 25515f3fbcea..000000000000 --- a/arch/s390/kernel/vdso32/getcpu.S +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of getcpu() for 32 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2016 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/dwarf.h> - - .text - .align 4 - .globl __kernel_getcpu - .type __kernel_getcpu,@function -__kernel_getcpu: - CFI_STARTPROC - la %r4,0 - sacf 256 - l %r5,__VDSO_CPU_NR(%r4) - l %r4,__VDSO_NODE_ID(%r4) - sacf 0 - ltr %r2,%r2 - jz 2f - st %r5,0(%r2) -2: ltr %r3,%r3 - jz 3f - st %r4,0(%r3) -3: lhi %r2,0 - br %r14 - CFI_ENDPROC - .size __kernel_getcpu,.-__kernel_getcpu diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S deleted file mode 100644 index b23063fbc892..000000000000 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of gettimeofday() for 32 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2008 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/dwarf.h> -#include <asm/ptrace.h> - - .text - .align 4 - .globl __kernel_gettimeofday - .type __kernel_gettimeofday,@function -__kernel_gettimeofday: - CFI_STARTPROC - ahi %r15,-16 - CFI_ADJUST_CFA_OFFSET 16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD - basr %r5,0 -0: al %r5,13f-0b(%r5) /* get &_vdso_data */ -1: ltr %r3,%r3 /* check if tz is NULL */ - je 2f - mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) -2: ltr %r2,%r2 /* check if tv is NULL */ - je 10f - l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ - tml %r4,0x0001 /* pending update ? loop */ - jnz 1b - stcke 0(%r15) /* Store TOD clock */ - lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */ - s %r0,1(%r15) - sl %r1,5(%r15) - brc 3,14f - ahi %r0,-1 -14: ltr %r0,%r0 /* past end of steering? */ - jm 16f - srdl %r0,15 /* 1 per 2^16 */ - tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ - jz 15f - lcr %r0,%r0 /* negative TOD offset */ - lcr %r1,%r1 - je 15f - ahi %r0,-1 -15: a %r0,1(%r15) /* add TOD timestamp */ - al %r1,5(%r15) - brc 12,17f - ahi %r0,1 - j 17f -16: lm %r0,%r1,1(%r15) /* load TOD timestamp */ -17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - sl %r1,__VDSO_XTIME_STAMP+4(%r5) - brc 3,3f - ahi %r0,-1 -3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ - st %r0,0(%r15) - l %r0,__VDSO_TK_MULT(%r5) - ltr %r1,%r1 - mr %r0,%r0 - jnm 4f - a %r0,__VDSO_TK_MULT(%r5) -4: al %r0,0(%r15) - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ - al %r1,__VDSO_XTIME_NSEC+4(%r5) - brc 12,5f - ahi %r0,1 -5: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5) - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ - jne 1b - l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - srdl %r0,0(%r4) /* >> tk->shift */ - l %r4,0(%r15) /* get tv_sec from stack */ - basr %r5,0 -6: ltr %r0,%r0 - jnz 7f - cl %r1,11f-6b(%r5) - jl 8f -7: ahi %r4,1 - sl %r1,11f-6b(%r5) - brc 3,6b - ahi %r0,-1 - j 6b -8: st %r4,0(%r2) /* store tv->tv_sec */ - ltr %r1,%r1 - m %r0,12f-6b(%r5) - jnm 9f - al %r0,12f-6b(%r5) -9: srl %r0,6 - st %r0,4(%r2) /* store tv->tv_usec */ -10: slr %r2,%r2 - ahi %r15,16 - CFI_ADJUST_CFA_OFFSET -16 - CFI_RESTORE 15 - br %r14 - CFI_ENDPROC -11: .long 1000000000 -12: .long 274877907 -13: .long _vdso_data - 0b - .size __kernel_gettimeofday,.-__kernel_gettimeofday diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso32/note.S deleted file mode 100644 index db19d0680a0a..000000000000 --- a/arch/s390/kernel/vdso32/note.S +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. - * Here we can supply some information useful to userland. - */ - -#include <linux/uts.h> -#include <linux/version.h> -#include <linux/elfnote.h> - -ELFNOTE_START(Linux, 0, "a") - .long LINUX_VERSION_CODE -ELFNOTE_END diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S deleted file mode 100644 index 721c4954cb6e..000000000000 --- a/arch/s390/kernel/vdso32/vdso32.lds.S +++ /dev/null @@ -1,142 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This is the infamous ld script for the 32 bits vdso - * library - */ - -#include <asm/page.h> -#include <asm/vdso.h> - -OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") -OUTPUT_ARCH(s390:31-bit) -ENTRY(_start) - -SECTIONS -{ - . = VDSO32_LBASE + SIZEOF_HEADERS; - - .hash : { *(.hash) } :text - .gnu.hash : { *(.gnu.hash) } - .dynsym : { *(.dynsym) } - .dynstr : { *(.dynstr) } - .gnu.version : { *(.gnu.version) } - .gnu.version_d : { *(.gnu.version_d) } - .gnu.version_r : { *(.gnu.version_r) } - - .note : { *(.note.*) } :text :note - - . = ALIGN(16); - .text : { - *(.text .stub .text.* .gnu.linkonce.t.*) - } :text - PROVIDE(__etext = .); - PROVIDE(_etext = .); - PROVIDE(etext = .); - - /* - * Other stuff is appended to the text segment: - */ - .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } - .rodata1 : { *(.rodata1) } - - .dynamic : { *(.dynamic) } :text :dynamic - - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr - .eh_frame : { KEEP (*(.eh_frame)) } :text - .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } - - .rela.dyn ALIGN(8) : { *(.rela.dyn) } - .got ALIGN(8) : { *(.got .toc) } - - _end = .; - PROVIDE(end = .); - - /* - * Stabs debugging sections are here too. - */ - .stab 0 : { *(.stab) } - .stabstr 0 : { *(.stabstr) } - .stab.excl 0 : { *(.stab.excl) } - .stab.exclstr 0 : { *(.stab.exclstr) } - .stab.index 0 : { *(.stab.index) } - .stab.indexstr 0 : { *(.stab.indexstr) } - .comment 0 : { *(.comment) } - - /* - * DWARF debug sections. - * Symbols in the DWARF debugging sections are relative to the - * beginning of the section so we begin them at 0. - */ - /* DWARF 1 */ - .debug 0 : { *(.debug) } - .line 0 : { *(.line) } - /* GNU DWARF 1 extensions */ - .debug_srcinfo 0 : { *(.debug_srcinfo) } - .debug_sfnames 0 : { *(.debug_sfnames) } - /* DWARF 1.1 and DWARF 2 */ - .debug_aranges 0 : { *(.debug_aranges) } - .debug_pubnames 0 : { *(.debug_pubnames) } - /* DWARF 2 */ - .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } - .debug_abbrev 0 : { *(.debug_abbrev) } - .debug_line 0 : { *(.debug_line) } - .debug_frame 0 : { *(.debug_frame) } - .debug_str 0 : { *(.debug_str) } - .debug_loc 0 : { *(.debug_loc) } - .debug_macinfo 0 : { *(.debug_macinfo) } - /* SGI/MIPS DWARF 2 extensions */ - .debug_weaknames 0 : { *(.debug_weaknames) } - .debug_funcnames 0 : { *(.debug_funcnames) } - .debug_typenames 0 : { *(.debug_typenames) } - .debug_varnames 0 : { *(.debug_varnames) } - /* DWARF 3 */ - .debug_pubtypes 0 : { *(.debug_pubtypes) } - .debug_ranges 0 : { *(.debug_ranges) } - .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } - - . = ALIGN(PAGE_SIZE); - PROVIDE(_vdso_data = .); - - /DISCARD/ : { - *(.note.GNU-stack) - *(.branch_lt) - *(.data .data.* .gnu.linkonce.d.* .sdata*) - *(.bss .sbss .dynbss .dynsbss) - } -} - -/* - * Very old versions of ld do not recognize this name token; use the constant. - */ -#define PT_GNU_EH_FRAME 0x6474e550 - -/* - * We must supply the ELF program headers explicitly to get just one - * PT_LOAD segment, and set the flags explicitly to make segments read-only. - */ -PHDRS -{ - text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ - dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ - note PT_NOTE FLAGS(4); /* PF_R */ - eh_frame_hdr PT_GNU_EH_FRAME; -} - -/* - * This controls what symbols we export from the DSO. - */ -VERSION -{ - VDSO_VERSION_STRING { - global: - /* - * Has to be there for the kernel to find - */ - __kernel_gettimeofday; - __kernel_clock_gettime; - __kernel_clock_getres; - __kernel_getcpu; - - local: *; - }; -} diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S deleted file mode 100644 index de2fb930471a..000000000000 --- a/arch/s390/kernel/vdso32/vdso32_wrapper.S +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/init.h> -#include <linux/linkage.h> -#include <asm/page.h> - - __PAGE_ALIGNED_DATA - - .globl vdso32_start, vdso32_end - .balign PAGE_SIZE -vdso32_start: - .incbin "arch/s390/kernel/vdso32/vdso32.so" - .balign PAGE_SIZE -vdso32_end: - - .previous diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S index 2446e9dac8ab..3c04f7328500 100644 --- a/arch/s390/kernel/vdso64/getcpu.S +++ b/arch/s390/kernel/vdso64/getcpu.S @@ -16,10 +16,8 @@ .type __kernel_getcpu,@function __kernel_getcpu: CFI_STARTPROC - la %r4,0 sacf 256 - l %r5,__VDSO_CPU_NR(%r4) - l %r4,__VDSO_NODE_ID(%r4) + lm %r4,%r5,__VDSO_GETCPU_VAL(%r0) sacf 0 ltgr %r2,%r2 jz 2f diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 7e0eb4020917..37695499717d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -15,6 +15,8 @@ /* Handle ro_after_init data on our own. */ #define RO_AFTER_INIT_DATA +#define EMITS_PT_NOTE + #include <asm-generic/vmlinux.lds.h> #include <asm/vmlinux.lds.h> @@ -50,11 +52,7 @@ SECTIONS _etext = .; /* End of text section */ } :text = 0x0700 - NOTES :text :note - - .dummy : { *(.dummy) } :data - - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ @@ -64,12 +62,12 @@ SECTIONS .data..ro_after_init : { *(.data..ro_after_init) JUMP_TABLE_DATA - } + } :data EXCEPTION_TABLE(16) . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; - RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) + RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) BOOT_DATA_PRESERVED _edata = .; /* End of data section */ diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c475ca49cfc6..8df10d3c8f6c 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -247,9 +247,9 @@ void vtime_account_irq_enter(struct task_struct *tsk) } EXPORT_SYMBOL_GPL(vtime_account_irq_enter); -void vtime_account_system(struct task_struct *tsk) +void vtime_account_kernel(struct task_struct *tsk) __attribute__((alias("vtime_account_irq_enter"))); -EXPORT_SYMBOL_GPL(vtime_account_system); +EXPORT_SYMBOL_GPL(vtime_account_kernel); /* * Sorted add to a list. List is linear searched until first bigger diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 45634b3d2e0a..3fb54ec2cf3e 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -158,14 +158,28 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; vcpu->stat.diagnose_9c++; - VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); + /* yield to self */ if (tid == vcpu->vcpu_id) - return 0; + goto no_yield; + /* yield to invalid */ tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid); - if (tcpu) - kvm_vcpu_yield_to(tcpu); + if (!tcpu) + goto no_yield; + + /* target already running */ + if (READ_ONCE(tcpu->cpu) >= 0) + goto no_yield; + + if (kvm_vcpu_yield_to(tcpu) <= 0) + goto no_yield; + + VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid); + return 0; +no_yield: + VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid); + vcpu->stat.diagnose_9c_ignored++; return 0; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index d1ccc168c071..165dea4c7f19 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1477,8 +1477,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) return 0; } -static int __inject_sigp_restart(struct kvm_vcpu *vcpu, - struct kvm_s390_irq *irq) +static int __inject_sigp_restart(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -2007,7 +2006,7 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) rc = __inject_sigp_stop(vcpu, irq); break; case KVM_S390_RESTART: - rc = __inject_sigp_restart(vcpu, irq); + rc = __inject_sigp_restart(vcpu); break; case KVM_S390_INT_CLOCK_COMP: rc = __inject_ckc(vcpu); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d047e846e1b9..d9e6bf3d54f0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -155,6 +155,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_diag_10", VCPU_STAT(diagnose_10) }, { "instruction_diag_44", VCPU_STAT(diagnose_44) }, { "instruction_diag_9c", VCPU_STAT(diagnose_9c) }, + { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) }, { "instruction_diag_258", VCPU_STAT(diagnose_258) }, { "instruction_diag_308", VCPU_STAT(diagnose_308) }, { "instruction_diag_500", VCPU_STAT(diagnose_500) }, @@ -453,16 +454,14 @@ static void kvm_s390_cpu_feat_init(void) int kvm_arch_init(void *opaque) { - int rc; + int rc = -ENOMEM; kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); if (!kvm_s390_dbf) return -ENOMEM; - if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { - rc = -ENOMEM; - goto out_debug_unreg; - } + if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) + goto out; kvm_s390_cpu_feat_init(); @@ -470,19 +469,17 @@ int kvm_arch_init(void *opaque) rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); if (rc) { pr_err("A FLIC registration call failed with rc=%d\n", rc); - goto out_debug_unreg; + goto out; } rc = kvm_s390_gib_init(GAL_ISC); if (rc) - goto out_gib_destroy; + goto out; return 0; -out_gib_destroy: - kvm_s390_gib_destroy(); -out_debug_unreg: - debug_unregister(kvm_s390_dbf); +out: + kvm_arch_exit(); return rc; } diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index d7c218e8b559..28fd66d558ff 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -11,3 +11,6 @@ lib-$(CONFIG_UPROBES) += probes.o # Instrumenting memory accesses to __user data (in different address space) # produce false positives KASAN_SANITIZE_uaccess.o := n + +obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o +CFLAGS_test_unwind.o += -fno-optimize-sibling-calls diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 30a7c8c29964..ce1e4bbe53aa 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -74,7 +74,7 @@ static inline int arch_load_niai4(int *lock) { int owner; - asm volatile( + asm_inline volatile( ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */ " l %0,%1\n" : "=d" (owner) : "Q" (*lock) : "memory"); @@ -85,7 +85,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new) { int expected = old; - asm volatile( + asm_inline volatile( ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */ " cs %0,%3,%1\n" : "=d" (old), "=Q" (*lock) diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c new file mode 100644 index 000000000000..bda7ac0ddd29 --- /dev/null +++ b/arch/s390/lib/test_unwind.c @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test module for unwind_for_each_frame + */ + +#define pr_fmt(fmt) "test_unwind: " fmt +#include <asm/unwind.h> +#include <linux/completion.h> +#include <linux/kallsyms.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/kprobes.h> +#include <linux/wait.h> +#include <asm/irq.h> +#include <asm/delay.h> + +#define BT_BUF_SIZE (PAGE_SIZE * 4) + +/* + * To avoid printk line limit split backtrace by lines + */ +static void print_backtrace(char *bt) +{ + char *p; + + while (true) { + p = strsep(&bt, "\n"); + if (!p) + break; + pr_err("%s\n", p); + } +} + +/* + * Calls unwind_for_each_frame(task, regs, sp) and verifies that the result + * contains unwindme_func2 followed by unwindme_func1. + */ +static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, + unsigned long sp) +{ + int frame_count, prev_is_func2, seen_func2_func1; + const int max_frames = 128; + struct unwind_state state; + size_t bt_pos = 0; + int ret = 0; + char *bt; + + bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC); + if (!bt) { + pr_err("failed to allocate backtrace buffer\n"); + return -ENOMEM; + } + /* Unwind. */ + frame_count = 0; + prev_is_func2 = 0; + seen_func2_func1 = 0; + unwind_for_each_frame(&state, task, regs, sp) { + unsigned long addr = unwind_get_return_address(&state); + char sym[KSYM_SYMBOL_LEN]; + + if (frame_count++ == max_frames) + break; + if (state.reliable && !addr) { + pr_err("unwind state reliable but addr is 0\n"); + return -EINVAL; + } + sprint_symbol(sym, addr); + if (bt_pos < BT_BUF_SIZE) { + bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos, + state.reliable ? " [%-7s%px] %pSR\n" : + "([%-7s%px] %pSR)\n", + stack_type_name(state.stack_info.type), + (void *)state.sp, (void *)state.ip); + if (bt_pos >= BT_BUF_SIZE) + pr_err("backtrace buffer is too small\n"); + } + frame_count += 1; + if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1")) + seen_func2_func1 = 1; + prev_is_func2 = str_has_prefix(sym, "unwindme_func2"); + } + + /* Check the results. */ + if (unwind_error(&state)) { + pr_err("unwind error\n"); + ret = -EINVAL; + } + if (!seen_func2_func1) { + pr_err("unwindme_func2 and unwindme_func1 not found\n"); + ret = -EINVAL; + } + if (frame_count == max_frames) { + pr_err("Maximum number of frames exceeded\n"); + ret = -EINVAL; + } + if (ret) + print_backtrace(bt); + kfree(bt); + return ret; +} + +/* State of the task being unwound. */ +struct unwindme { + int flags; + int ret; + struct task_struct *task; + struct completion task_ready; + wait_queue_head_t task_wq; + unsigned long sp; +}; + +static struct unwindme *unwindme; + +/* Values of unwindme.flags. */ +#define UWM_DEFAULT 0x0 +#define UWM_THREAD 0x1 /* Unwind a separate task. */ +#define UWM_REGS 0x2 /* Pass regs to test_unwind(). */ +#define UWM_SP 0x4 /* Pass sp to test_unwind(). */ +#define UWM_CALLER 0x8 /* Unwind starting from caller. */ +#define UWM_SWITCH_STACK 0x10 /* Use CALL_ON_STACK. */ +#define UWM_IRQ 0x20 /* Unwind from irq context. */ +#define UWM_PGM 0x40 /* Unwind from program check handler. */ + +static __always_inline unsigned long get_psw_addr(void) +{ + unsigned long psw_addr; + + asm volatile( + "basr %[psw_addr],0\n" + : [psw_addr] "=d" (psw_addr)); + return psw_addr; +} + +#ifdef CONFIG_KPROBES +static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct unwindme *u = unwindme; + + u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL, + (u->flags & UWM_SP) ? u->sp : 0); + return 0; +} +#endif + +/* This function may or may not appear in the backtrace. */ +static noinline int unwindme_func4(struct unwindme *u) +{ + if (!(u->flags & UWM_CALLER)) + u->sp = current_frame_address(); + if (u->flags & UWM_THREAD) { + complete(&u->task_ready); + wait_event(u->task_wq, kthread_should_park()); + kthread_parkme(); + return 0; +#ifdef CONFIG_KPROBES + } else if (u->flags & UWM_PGM) { + struct kprobe kp; + int ret; + + unwindme = u; + memset(&kp, 0, sizeof(kp)); + kp.symbol_name = "do_report_trap"; + kp.pre_handler = pgm_pre_handler; + ret = register_kprobe(&kp); + if (ret < 0) { + pr_err("register_kprobe failed %d\n", ret); + return -EINVAL; + } + + /* + * trigger specification exception + */ + asm volatile( + " mvcl %%r1,%%r1\n" + "0: nopr %%r7\n" + EX_TABLE(0b, 0b) + :); + + unregister_kprobe(&kp); + unwindme = NULL; + return u->ret; +#endif + } else { + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + regs.psw.addr = get_psw_addr(); + regs.gprs[15] = current_stack_pointer(); + return test_unwind(NULL, + (u->flags & UWM_REGS) ? ®s : NULL, + (u->flags & UWM_SP) ? u->sp : 0); + } +} + +/* This function may or may not appear in the backtrace. */ +static noinline int unwindme_func3(struct unwindme *u) +{ + u->sp = current_frame_address(); + return unwindme_func4(u); +} + +/* This function must appear in the backtrace. */ +static noinline int unwindme_func2(struct unwindme *u) +{ + int rc; + + if (u->flags & UWM_SWITCH_STACK) { + preempt_disable(); + rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u); + preempt_enable(); + return rc; + } else { + return unwindme_func3(u); + } +} + +/* This function must follow unwindme_func2 in the backtrace. */ +static noinline int unwindme_func1(void *u) +{ + return unwindme_func2((struct unwindme *)u); +} + +static void unwindme_irq_handler(struct ext_code ext_code, + unsigned int param32, + unsigned long param64) +{ + struct unwindme *u = READ_ONCE(unwindme); + + if (u && u->task == current) { + unwindme = NULL; + u->task = NULL; + u->ret = unwindme_func1(u); + } +} + +static int test_unwind_irq(struct unwindme *u) +{ + preempt_disable(); + if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) { + pr_info("Couldn't reqister external interrupt handler"); + return -1; + } + u->task = current; + unwindme = u; + udelay(1); + unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler); + preempt_enable(); + return u->ret; +} + +/* Spawns a task and passes it to test_unwind(). */ +static int test_unwind_task(struct unwindme *u) +{ + struct task_struct *task; + int ret; + + /* Initialize thread-related fields. */ + init_completion(&u->task_ready); + init_waitqueue_head(&u->task_wq); + + /* + * Start the task and wait until it reaches unwindme_func4() and sleeps + * in (task_ready, unwind_done] range. + */ + task = kthread_run(unwindme_func1, u, "%s", __func__); + if (IS_ERR(task)) { + pr_err("kthread_run() failed\n"); + return PTR_ERR(task); + } + /* + * Make sure task reaches unwindme_func4 before parking it, + * we might park it before kthread function has been executed otherwise + */ + wait_for_completion(&u->task_ready); + kthread_park(task); + /* Unwind. */ + ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0); + kthread_stop(task); + return ret; +} + +static int test_unwind_flags(int flags) +{ + struct unwindme u; + + u.flags = flags; + if (u.flags & UWM_THREAD) + return test_unwind_task(&u); + else if (u.flags & UWM_IRQ) + return test_unwind_irq(&u); + else + return unwindme_func1(&u); +} + +static int test_unwind_init(void) +{ + int ret = 0; + +#define TEST(flags) \ +do { \ + pr_info("[ RUN ] " #flags "\n"); \ + if (!test_unwind_flags((flags))) { \ + pr_info("[ OK ] " #flags "\n"); \ + } else { \ + pr_err("[ FAILED ] " #flags "\n"); \ + ret = -EINVAL; \ + } \ +} while (0) + + TEST(UWM_DEFAULT); + TEST(UWM_SP); + TEST(UWM_REGS); + TEST(UWM_SWITCH_STACK); + TEST(UWM_SP | UWM_REGS); + TEST(UWM_CALLER | UWM_SP); + TEST(UWM_CALLER | UWM_SP | UWM_REGS); + TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); + TEST(UWM_THREAD); + TEST(UWM_THREAD | UWM_SP); + TEST(UWM_THREAD | UWM_CALLER | UWM_SP); + TEST(UWM_IRQ); + TEST(UWM_IRQ | UWM_SWITCH_STACK); + TEST(UWM_IRQ | UWM_SP); + TEST(UWM_IRQ | UWM_REGS); + TEST(UWM_IRQ | UWM_SP | UWM_REGS); + TEST(UWM_IRQ | UWM_CALLER | UWM_SP); + TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS); + TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); +#ifdef CONFIG_KPROBES + TEST(UWM_PGM); + TEST(UWM_PGM | UWM_SP); + TEST(UWM_PGM | UWM_REGS); + TEST(UWM_PGM | UWM_SP | UWM_REGS); +#endif +#undef TEST + + return ret; +} + +static void test_unwind_exit(void) +{ +} + +module_init(test_unwind_init); +module_exit(test_unwind_exit); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 510a18299196..a51c892f14f3 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, } if (write) { - len = *lenp; - if (copy_from_user(buf, buffer, - len > sizeof(buf) ? sizeof(buf) : len)) + len = min(*lenp, sizeof(buf)); + if (copy_from_user(buf, buffer, len)) return -EFAULT; - buf[sizeof(buf) - 1] = '\0'; + buf[len - 1] = '\0'; cmm_skip_blanks(buf, &p); nr = simple_strtoul(p, &p, 0); cmm_skip_blanks(p, &p); seconds = simple_strtoul(p, &p, 0); cmm_set_timeout(nr, seconds); + *ppos += *lenp; } else { len = sprintf(buf, "%ld %ld\n", cmm_timeout_pages, cmm_timeout_seconds); @@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; + *lenp = len; + *ppos += len; } - *lenp = len; - *ppos += len; return 0; } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index a124f19f7b3c..f0ce22220565 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -118,6 +118,7 @@ void __init paging_init(void) sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); + zone_dma_bits = 31; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 1864a8bb9622..de7ca4b6718f 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size) spin_unlock_irqrestore(&s390_kernel_write_lock, flags); } -static int __memcpy_real(void *dest, void *src, size_t count) +static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) { register unsigned long _dest asm("2") = (unsigned long) dest; register unsigned long _len1 asm("3") = (unsigned long) count; @@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count) return rc; } -static unsigned long _memcpy_real(unsigned long dest, unsigned long src, - unsigned long count) +static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, + unsigned long src, + unsigned long count) { int irqs_disabled, rc; unsigned long flags; if (!count) return 0; - flags = __arch_local_irq_stnsm(0xf8UL); + flags = arch_local_irq_save(); irqs_disabled = arch_irqs_disabled_flags(flags); if (!irqs_disabled) trace_hardirqs_off(); + __arch_local_irq_stnsm(0xf8); // disable DAT rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); + if (flags & PSW_MASK_DAT) + __arch_local_irq_stosm(0x04); // enable DAT if (!irqs_disabled) trace_hardirqs_on(); __arch_local_irq_ssm(flags); @@ -115,9 +119,15 @@ static unsigned long _memcpy_real(unsigned long dest, unsigned long src, */ int memcpy_real(void *dest, void *src, size_t count) { - if (S390_lowcore.nodat_stack != 0) - return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, - 3, dest, src, count); + int rc; + + if (S390_lowcore.nodat_stack != 0) { + preempt_disable(); + rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3, + dest, src, count); + preempt_enable(); + return rc; + } /* * This is a really early memcpy_real call, the stacks are * not set up yet. Just call _memcpy_real on the early boot diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ce88211b9c6c..8d2134136290 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -23,6 +23,8 @@ #include <linux/filter.h> #include <linux/init.h> #include <linux/bpf.h> +#include <linux/mm.h> +#include <linux/kernel.h> #include <asm/cacheflush.h> #include <asm/dis.h> #include <asm/facility.h> @@ -38,10 +40,11 @@ struct bpf_jit { int size; /* Size of program and literal pool */ int size_prg; /* Size of program */ int prg; /* Current position in program */ - int lit_start; /* Start of literal pool */ - int lit; /* Current position in literal pool */ + int lit32_start; /* Start of 32-bit literal pool */ + int lit32; /* Current position in 32-bit literal pool */ + int lit64_start; /* Start of 64-bit literal pool */ + int lit64; /* Current position in 64-bit literal pool */ int base_ip; /* Base address for literal pool */ - int ret0_ip; /* Address of return 0 */ int exit_ip; /* Address of exit */ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ @@ -49,14 +52,10 @@ struct bpf_jit { int labels[1]; /* Labels for local jumps */ }; -#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ - -#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */ -#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */ -#define SEEN_LITERAL (1 << 2) /* code uses literals */ -#define SEEN_FUNC (1 << 3) /* calls C functions */ -#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */ -#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */ +#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ +#define SEEN_LITERAL BIT(1) /* code uses literals */ +#define SEEN_FUNC BIT(2) /* calls C functions */ +#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) /* @@ -131,13 +130,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT2(op) \ ({ \ if (jit->prg_buf) \ - *(u16 *) (jit->prg_buf + jit->prg) = op; \ + *(u16 *) (jit->prg_buf + jit->prg) = (op); \ jit->prg += 2; \ }) #define EMIT2(op, b1, b2) \ ({ \ - _EMIT2(op | reg(b1, b2)); \ + _EMIT2((op) | reg(b1, b2)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) @@ -145,20 +144,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT4(op) \ ({ \ if (jit->prg_buf) \ - *(u32 *) (jit->prg_buf + jit->prg) = op; \ + *(u32 *) (jit->prg_buf + jit->prg) = (op); \ jit->prg += 4; \ }) #define EMIT4(op, b1, b2) \ ({ \ - _EMIT4(op | reg(b1, b2)); \ + _EMIT4((op) | reg(b1, b2)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) #define EMIT4_RRF(op, b1, b2, b3) \ ({ \ - _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \ + _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ REG_SET_SEEN(b3); \ @@ -167,13 +166,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT4_DISP(op, disp) \ ({ \ unsigned int __disp = (disp) & 0xfff; \ - _EMIT4(op | __disp); \ + _EMIT4((op) | __disp); \ }) #define EMIT4_DISP(op, b1, b2, disp) \ ({ \ - _EMIT4_DISP(op | reg_high(b1) << 16 | \ - reg_high(b2) << 8, disp); \ + _EMIT4_DISP((op) | reg_high(b1) << 16 | \ + reg_high(b2) << 8, (disp)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) @@ -181,21 +180,27 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT4_IMM(op, b1, imm) \ ({ \ unsigned int __imm = (imm) & 0xffff; \ - _EMIT4(op | reg_high(b1) << 16 | __imm); \ + _EMIT4((op) | reg_high(b1) << 16 | __imm); \ REG_SET_SEEN(b1); \ }) #define EMIT4_PCREL(op, pcrel) \ ({ \ long __pcrel = ((pcrel) >> 1) & 0xffff; \ - _EMIT4(op | __pcrel); \ + _EMIT4((op) | __pcrel); \ +}) + +#define EMIT4_PCREL_RIC(op, mask, target) \ +({ \ + int __rel = ((target) - jit->prg) / 2; \ + _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \ }) #define _EMIT6(op1, op2) \ ({ \ if (jit->prg_buf) { \ - *(u32 *) (jit->prg_buf + jit->prg) = op1; \ - *(u16 *) (jit->prg_buf + jit->prg + 4) = op2; \ + *(u32 *) (jit->prg_buf + jit->prg) = (op1); \ + *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \ } \ jit->prg += 6; \ }) @@ -203,20 +208,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT6_DISP(op1, op2, disp) \ ({ \ unsigned int __disp = (disp) & 0xfff; \ - _EMIT6(op1 | __disp, op2); \ + _EMIT6((op1) | __disp, op2); \ }) #define _EMIT6_DISP_LH(op1, op2, disp) \ ({ \ - u32 _disp = (u32) disp; \ + u32 _disp = (u32) (disp); \ unsigned int __disp_h = _disp & 0xff000; \ unsigned int __disp_l = _disp & 0x00fff; \ - _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \ + _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \ }) #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \ ({ \ - _EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 | \ + _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \ reg_high(b3) << 8, op2, disp); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ @@ -226,8 +231,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \ ({ \ int rel = (jit->labels[label] - jit->prg) >> 1; \ - _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \ - op2 | mask << 12); \ + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ + (op2) | (mask) << 12); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) @@ -235,68 +240,83 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \ ({ \ int rel = (jit->labels[label] - jit->prg) >> 1; \ - _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \ - (rel & 0xffff), op2 | (imm & 0xff) << 8); \ + _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ + (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ REG_SET_SEEN(b1); \ - BUILD_BUG_ON(((unsigned long) imm) > 0xff); \ + BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \ }) #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ /* Branch instruction needs 6 bytes */ \ - int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\ - _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \ + int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\ + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) #define EMIT6_PCREL_RILB(op, b, target) \ ({ \ - int rel = (target - jit->prg) / 2; \ - _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ + unsigned int rel = (int)((target) - jit->prg) / 2; \ + _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\ REG_SET_SEEN(b); \ }) #define EMIT6_PCREL_RIL(op, target) \ ({ \ - int rel = (target - jit->prg) / 2; \ - _EMIT6(op | rel >> 16, rel & 0xffff); \ + unsigned int rel = (int)((target) - jit->prg) / 2; \ + _EMIT6((op) | rel >> 16, rel & 0xffff); \ +}) + +#define EMIT6_PCREL_RILC(op, mask, target) \ +({ \ + EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \ }) #define _EMIT6_IMM(op, imm) \ ({ \ unsigned int __imm = (imm); \ - _EMIT6(op | (__imm >> 16), __imm & 0xffff); \ + _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \ }) #define EMIT6_IMM(op, b1, imm) \ ({ \ - _EMIT6_IMM(op | reg_high(b1) << 16, imm); \ + _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \ REG_SET_SEEN(b1); \ }) -#define EMIT_CONST_U32(val) \ +#define _EMIT_CONST_U32(val) \ ({ \ unsigned int ret; \ - ret = jit->lit - jit->base_ip; \ - jit->seen |= SEEN_LITERAL; \ + ret = jit->lit32; \ if (jit->prg_buf) \ - *(u32 *) (jit->prg_buf + jit->lit) = (u32) val; \ - jit->lit += 4; \ + *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\ + jit->lit32 += 4; \ ret; \ }) -#define EMIT_CONST_U64(val) \ +#define EMIT_CONST_U32(val) \ ({ \ - unsigned int ret; \ - ret = jit->lit - jit->base_ip; \ jit->seen |= SEEN_LITERAL; \ + _EMIT_CONST_U32(val) - jit->base_ip; \ +}) + +#define _EMIT_CONST_U64(val) \ +({ \ + unsigned int ret; \ + ret = jit->lit64; \ if (jit->prg_buf) \ - *(u64 *) (jit->prg_buf + jit->lit) = (u64) val; \ - jit->lit += 8; \ + *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\ + jit->lit64 += 8; \ ret; \ }) +#define EMIT_CONST_U64(val) \ +({ \ + jit->seen |= SEEN_LITERAL; \ + _EMIT_CONST_U64(val) - jit->base_ip; \ +}) + #define EMIT_ZERO(b1) \ ({ \ if (!fp->aux->verifier_zext) { \ @@ -307,6 +327,67 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) }) /* + * Return whether this is the first pass. The first pass is special, since we + * don't know any sizes yet, and thus must be conservative. + */ +static bool is_first_pass(struct bpf_jit *jit) +{ + return jit->size == 0; +} + +/* + * Return whether this is the code generation pass. The code generation pass is + * special, since we should change as little as possible. + */ +static bool is_codegen_pass(struct bpf_jit *jit) +{ + return jit->prg_buf; +} + +/* + * Return whether "rel" can be encoded as a short PC-relative offset + */ +static bool is_valid_rel(int rel) +{ + return rel >= -65536 && rel <= 65534; +} + +/* + * Return whether "off" can be reached using a short PC-relative offset + */ +static bool can_use_rel(struct bpf_jit *jit, int off) +{ + return is_valid_rel(off - jit->prg); +} + +/* + * Return whether given displacement can be encoded using + * Long-Displacement Facility + */ +static bool is_valid_ldisp(int disp) +{ + return disp >= -524288 && disp <= 524287; +} + +/* + * Return whether the next 32-bit literal pool entry can be referenced using + * Long-Displacement Facility + */ +static bool can_use_ldisp_for_lit32(struct bpf_jit *jit) +{ + return is_valid_ldisp(jit->lit32 - jit->base_ip); +} + +/* + * Return whether the next 64-bit literal pool entry can be referenced using + * Long-Displacement Facility + */ +static bool can_use_ldisp_for_lit64(struct bpf_jit *jit) +{ + return is_valid_ldisp(jit->lit64 - jit->base_ip); +} + +/* * Fill whole space with illegal instructions */ static void jit_fill_hole(void *area, unsigned int size) @@ -383,9 +464,18 @@ static int get_end(struct bpf_jit *jit, int start) */ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) { - + const int last = 15, save_restore_size = 6; int re = 6, rs; + if (is_first_pass(jit)) { + /* + * We don't know yet which registers are used. Reserve space + * conservatively. + */ + jit->prg += (last - re + 1) * save_restore_size; + return; + } + do { rs = get_start(jit, re); if (!rs) @@ -396,7 +486,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) else restore_regs(jit, rs, re, stack_depth); re++; - } while (re <= 15); + } while (re <= last); } /* @@ -420,21 +510,28 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) /* Save registers */ save_restore_regs(jit, REGS_SAVE, stack_depth); /* Setup literal pool */ - if (jit->seen & SEEN_LITERAL) { - /* basr %r13,0 */ - EMIT2(0x0d00, REG_L, REG_0); - jit->base_ip = jit->prg; + if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { + if (!is_first_pass(jit) && + is_valid_ldisp(jit->size - (jit->prg + 2))) { + /* basr %l,0 */ + EMIT2(0x0d00, REG_L, REG_0); + jit->base_ip = jit->prg; + } else { + /* larl %l,lit32_start */ + EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start); + jit->base_ip = jit->lit32_start; + } } /* Setup stack and backchain */ - if (jit->seen & SEEN_STACK) { - if (jit->seen & SEEN_FUNC) + if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { + if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) /* lgr %w1,%r15 (backchain) */ EMIT4(0xb9040000, REG_W1, REG_15); /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - if (jit->seen & SEEN_FUNC) + if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) /* stg %w1,152(%r15) (backchain) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); @@ -446,12 +543,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) */ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) { - /* Return 0 */ - if (jit->seen & SEEN_RET0) { - jit->ret0_ip = jit->prg; - /* lghi %b0,0 */ - EMIT4_IMM(0xa7090000, BPF_REG_0, 0); - } jit->exit_ip = jit->prg; /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); @@ -476,7 +567,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) _EMIT2(0x07fe); if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable && - (jit->seen & SEEN_FUNC)) { + (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) { jit->r1_thunk_ip = jit->prg; /* Generate __s390_indirect_jump_r1 thunk */ if (test_facility(35)) { @@ -506,16 +597,14 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i, bool extra_pass) { struct bpf_insn *insn = &fp->insnsi[i]; - int jmp_off, last, insn_count = 1; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; + int last, insn_count = 1; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; unsigned int mask; - if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) - jit->seen |= SEEN_REG_AX; switch (insn->code) { /* * BPF_MOV @@ -549,9 +638,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, u64 imm64; imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32; - /* lg %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm64)); + /* lgrl %dst,imm */ + EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64)); insn_count = 2; break; } @@ -680,9 +768,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7080000, REG_W0, 0); /* lr %w1,%dst */ EMIT2(0x1800, REG_W1, dst_reg); - /* dl %w0,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L, - EMIT_CONST_U32(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) { + /* dl %w0,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L, + EMIT_CONST_U32(imm)); + } else { + /* lgfrl %dst,imm */ + EMIT6_PCREL_RILB(0xc40c0000, dst_reg, + _EMIT_CONST_U32(imm)); + jit->seen |= SEEN_LITERAL; + /* dlr %w0,%dst */ + EMIT4(0xb9970000, REG_W0, dst_reg); + } /* llgfr %dst,%rc */ EMIT4(0xb9160000, dst_reg, rc_reg); if (insn_is_zext(&insn[1])) @@ -704,9 +801,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7090000, REG_W0, 0); /* lgr %w1,%dst */ EMIT4(0xb9040000, REG_W1, dst_reg); - /* dlg %w0,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* dlg %w0,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %dst,imm */ + EMIT6_PCREL_RILB(0xc4080000, dst_reg, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* dlgr %w0,%dst */ + EMIT4(0xb9870000, REG_W0, dst_reg); + } /* lgr %dst,%rc */ EMIT4(0xb9040000, dst_reg, rc_reg); break; @@ -729,9 +835,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ - /* ng %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* ng %dst,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0080, + dst_reg, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %w0,imm */ + EMIT6_PCREL_RILB(0xc4080000, REG_W0, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* ngr %dst,%w0 */ + EMIT4(0xb9800000, dst_reg, REG_W0); + } break; /* * BPF_OR @@ -751,9 +867,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */ - /* og %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* og %dst,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0081, + dst_reg, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %w0,imm */ + EMIT6_PCREL_RILB(0xc4080000, REG_W0, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* ogr %dst,%w0 */ + EMIT4(0xb9810000, dst_reg, REG_W0); + } break; /* * BPF_XOR @@ -775,9 +901,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ - /* xg %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* xg %dst,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0082, + dst_reg, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %w0,imm */ + EMIT6_PCREL_RILB(0xc4080000, REG_W0, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* xgr %dst,%w0 */ + EMIT4(0xb9820000, dst_reg, REG_W0); + } break; /* * BPF_LSH @@ -1023,9 +1159,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, REG_SET_SEEN(BPF_REG_5); jit->seen |= SEEN_FUNC; - /* lg %w1,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, - EMIT_CONST_U64(func)); + /* lgrl %w1,func */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func)); if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) { /* brasl %r14,__s390_indirect_jump_r1 */ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); @@ -1054,9 +1189,17 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* llgf %w1,map.max_entries(%b2) */ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, offsetof(struct bpf_array, map.max_entries)); - /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */ - EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, - REG_W1, 0, 0xa); + /* if ((u32)%b3 >= (u32)%w1) goto out; */ + if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { + /* clrj %b3,%w1,0xa,label0 */ + EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, + REG_W1, 0, 0xa); + } else { + /* clr %b3,%w1 */ + EMIT2(0x1500, BPF_REG_3, REG_W1); + /* brcl 0xa,label0 */ + EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]); + } /* * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) @@ -1071,9 +1214,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7080000, REG_W0, 1); /* laal %w1,%w0,off(%r15) */ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); - /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ - EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, - MAX_TAIL_CALL_CNT, 0, 0x2); + if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { + /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ + EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, + MAX_TAIL_CALL_CNT, 0, 0x2); + } else { + /* clfi %w1,MAX_TAIL_CALL_CNT */ + EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT); + /* brcl 0x2,label0 */ + EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]); + } /* * prog = array->ptrs[index]; @@ -1085,11 +1235,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb9160000, REG_1, BPF_REG_3); /* sllg %r1,%r1,3: %r1 *= 8 */ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); - /* lg %r1,prog(%b2,%r1) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, + /* ltg %r1,prog(%b2,%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2, REG_1, offsetof(struct bpf_array, ptrs)); - /* clgij %r1,0,0x8,label0 */ - EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8); + if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { + /* brc 0x8,label0 */ + EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]); + } else { + /* brcl 0x8,label0 */ + EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]); + } /* * Restore registers before calling function @@ -1110,7 +1265,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, break; case BPF_JMP | BPF_EXIT: /* return b0 */ last = (i == fp->len - 1) ? 1 : 0; - if (last && !(jit->seen & SEEN_RET0)) + if (last) break; /* j <exit> */ EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); @@ -1246,36 +1401,83 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, goto branch_oc; branch_ks: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* crj or cgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), - dst_reg, REG_W1, i, off, mask); + /* cfi or cgfi %dst,imm */ + EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000, + dst_reg, imm); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* brc mask,off */ + EMIT4_PCREL_RIC(0xa7040000, + mask >> 12, addrs[i + off + 1]); + } else { + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_ku: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* clrj or clgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), - dst_reg, REG_W1, i, off, mask); + /* clfi or clgfi %dst,imm */ + EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000, + dst_reg, imm); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* brc mask,off */ + EMIT4_PCREL_RIC(0xa7040000, + mask >> 12, addrs[i + off + 1]); + } else { + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_xs: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* crj or cgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), - dst_reg, src_reg, i, off, mask); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* crj or cgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, src_reg, i, off, mask); + } else { + /* cr or cgr %dst,%src */ + if (is_jmp32) + EMIT2(0x1900, dst_reg, src_reg); + else + EMIT4(0xb9200000, dst_reg, src_reg); + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_xu: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* clrj or clgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), - dst_reg, src_reg, i, off, mask); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* clrj or clgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, src_reg, i, off, mask); + } else { + /* clr or clgr %dst,%src */ + if (is_jmp32) + EMIT2(0x1500, dst_reg, src_reg); + else + EMIT4(0xb9210000, dst_reg, src_reg); + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_oc: - /* brc mask,jmp_off (branch instruction needs 4 bytes) */ - jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); - EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* brc mask,off */ + EMIT4_PCREL_RIC(0xa7040000, + mask >> 12, addrs[i + off + 1]); + } else { + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; } default: /* too complex, give up */ @@ -1286,28 +1488,67 @@ branch_oc: } /* + * Return whether new i-th instruction address does not violate any invariant + */ +static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i) +{ + /* On the first pass anything goes */ + if (is_first_pass(jit)) + return true; + + /* The codegen pass must not change anything */ + if (is_codegen_pass(jit)) + return jit->addrs[i] == jit->prg; + + /* Passes in between must not increase code size */ + return jit->addrs[i] >= jit->prg; +} + +/* + * Update the address of i-th instruction + */ +static int bpf_set_addr(struct bpf_jit *jit, int i) +{ + if (!bpf_is_new_addr_sane(jit, i)) + return -1; + jit->addrs[i] = jit->prg; + return 0; +} + +/* * Compile eBPF program into s390x code */ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, bool extra_pass) { - int i, insn_count; + int i, insn_count, lit32_size, lit64_size; - jit->lit = jit->lit_start; + jit->lit32 = jit->lit32_start; + jit->lit64 = jit->lit64_start; jit->prg = 0; bpf_jit_prologue(jit, fp->aux->stack_depth); + if (bpf_set_addr(jit, 0) < 0) + return -1; for (i = 0; i < fp->len; i += insn_count) { insn_count = bpf_jit_insn(jit, fp, i, extra_pass); if (insn_count < 0) return -1; /* Next instruction address */ - jit->addrs[i + insn_count] = jit->prg; + if (bpf_set_addr(jit, i + insn_count) < 0) + return -1; } bpf_jit_epilogue(jit, fp->aux->stack_depth); - jit->lit_start = jit->prg; - jit->size = jit->lit; + lit32_size = jit->lit32 - jit->lit32_start; + lit64_size = jit->lit64 - jit->lit64_start; + jit->lit32_start = jit->prg; + if (lit32_size) + jit->lit32_start = ALIGN(jit->lit32_start, 4); + jit->lit64_start = jit->lit32_start + lit32_size; + if (lit64_size) + jit->lit64_start = ALIGN(jit->lit64_start, 8); + jit->size = jit->lit64_start + lit64_size; jit->size_prg = jit->prg; return 0; } @@ -1369,7 +1610,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) } memset(&jit, 0, sizeof(jit)); - jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); + jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); if (jit.addrs == NULL) { fp = orig_fp; goto out; @@ -1388,12 +1629,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) /* * Final pass: Allocate and generate program */ - if (jit.size >= BPF_SIZE_MAX) { - fp = orig_fp; - goto free_addrs; - } - - header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); + header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 8, jit_fill_hole); if (!header) { fp = orig_fp; goto free_addrs; @@ -1422,7 +1658,7 @@ skip_init_ctx: if (!fp->is_func || extra_pass) { bpf_prog_fill_jited_linfo(fp, jit.addrs + 1); free_addrs: - kfree(jit.addrs); + kvfree(jit.addrs); kfree(jit_data); fp->aux->jit_data = NULL; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index c7fea9bea8cb..8e872951c07b 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -27,6 +27,7 @@ #include <linux/seq_file.h> #include <linux/jump_label.h> #include <linux/pci.h> +#include <linux/printk.h> #include <asm/isc.h> #include <asm/airq.h> @@ -43,7 +44,7 @@ static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); static DEFINE_SPINLOCK(zpci_domain_lock); #define ZPCI_IOMAP_ENTRIES \ - min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \ + min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \ ZPCI_IOMAP_MAX_ENTRIES) static DEFINE_SPINLOCK(zpci_iomap_lock); @@ -294,7 +295,7 @@ static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar, void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar, unsigned long offset, unsigned long max) { - if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT) + if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) return NULL; if (static_branch_likely(&have_mio)) @@ -324,7 +325,7 @@ static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar, void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar, unsigned long offset, unsigned long max) { - if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT) + if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) return NULL; if (static_branch_likely(&have_mio)) @@ -416,7 +417,7 @@ static void zpci_map_resources(struct pci_dev *pdev) resource_size_t len; int i; - for (i = 0; i < PCI_BAR_COUNT; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { len = pci_resource_len(pdev, i); if (!len) continue; @@ -451,7 +452,7 @@ static void zpci_unmap_resources(struct pci_dev *pdev) if (zpci_use_mio(zdev)) return; - for (i = 0; i < PCI_BAR_COUNT; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { len = pci_resource_len(pdev, i); if (!len) continue; @@ -514,7 +515,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev, snprintf(zdev->res_name, sizeof(zdev->res_name), "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR); - for (i = 0; i < PCI_BAR_COUNT; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (!zdev->bars[i].size) continue; entry = zpci_alloc_iomap(zdev); @@ -551,7 +552,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) { int i; - for (i = 0; i < PCI_BAR_COUNT; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (!zdev->bars[i].size || !zdev->bars[i].res) continue; @@ -573,7 +574,7 @@ int pcibios_add_device(struct pci_dev *pdev) pdev->dev.dma_ops = &s390_pci_dma_ops; zpci_map_resources(pdev); - for (i = 0; i < PCI_BAR_COUNT; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { res = &pdev->resource[i]; if (res->parent || !res->flags) continue; @@ -659,6 +660,8 @@ static int zpci_alloc_domain(struct zpci_dev *zdev) spin_lock(&zpci_domain_lock); if (test_bit(zdev->domain, zpci_domain)) { spin_unlock(&zpci_domain_lock); + pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n", + zdev->fid, zdev->domain); return -EEXIST; } set_bit(zdev->domain, zpci_domain); @@ -670,6 +673,8 @@ static int zpci_alloc_domain(struct zpci_dev *zdev) zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); if (zdev->domain == ZPCI_NR_DEVICES) { spin_unlock(&zpci_domain_lock); + pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n", + zdev->fid, ZPCI_NR_DEVICES); return -ENOSPC; } set_bit(zdev->domain, zpci_domain); diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index e585a62d6530..4c613e569fe0 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -145,7 +145,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev, { int i; - for (i = 0; i < PCI_BAR_COUNT; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { zdev->bars[i].val = le32_to_cpu(response->bar[i]); zdev->bars[i].size = response->bar_size[i]; } @@ -164,8 +164,8 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev, sizeof(zdev->util_str)); } zdev->mio_capable = response->mio_addr_avail; - for (i = 0; i < PCI_BAR_COUNT; i++) { - if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1)))) + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1)))) continue; zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb; |