diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-08-18 12:59:46 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-08-29 11:05:09 +0200 |
commit | 69c0e360f990c2dc737681f40a361195066cef02 (patch) | |
tree | e5e5ff1ff6f09e21b28752b948283589803d81eb /arch/s390/crypto | |
parent | d863d5945f2be0abfcd9d36b1a7c605f3eaef517 (diff) | |
download | linux-69c0e360f990c2dc737681f40a361195066cef02.tar.bz2 |
s390/crypto: cpacf function detection
The CPACF code makes some assumptions about the availablity of hardware
support. E.g. if the machine supports KM(AES-256) without chaining it is
assumed that KMC(AES-256) with chaining is available as well. For the
existing CPUs this is true but the architecturally correct way is to
check each CPACF functions on its own. This is what the query function
of each instructions is all about.
Reviewed-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/crypto')
-rw-r--r-- | arch/s390/crypto/aes_s390.c | 299 | ||||
-rw-r--r-- | arch/s390/crypto/des_s390.c | 77 | ||||
-rw-r--r-- | arch/s390/crypto/ghash_s390.c | 2 | ||||
-rw-r--r-- | arch/s390/crypto/prng.c | 4 | ||||
-rw-r--r-- | arch/s390/crypto/sha1_s390.c | 2 | ||||
-rw-r--r-- | arch/s390/crypto/sha256_s390.c | 2 | ||||
-rw-r--r-- | arch/s390/crypto/sha512_s390.c | 2 |
7 files changed, 167 insertions, 221 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index be87575270ff..f4ad96ebb7e9 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -31,13 +31,10 @@ #include <crypto/xts.h> #include <asm/cpacf.h> -#define AES_KEYLEN_128 1 -#define AES_KEYLEN_192 2 -#define AES_KEYLEN_256 4 - static u8 *ctrblk; static DEFINE_SPINLOCK(ctrblk_lock); -static char keylen_flag; + +static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; struct s390_aes_ctx { u8 key[AES_MAX_KEY_SIZE]; @@ -65,33 +62,6 @@ struct s390_xts_ctx { struct crypto_skcipher *fallback; }; -/* - * Check if the key_len is supported by the HW. - * Returns 0 if it is, a positive number if it is not and software fallback is - * required or a negative number in case the key size is not valid - */ -static int need_fallback(unsigned int key_len) -{ - switch (key_len) { - case 16: - if (!(keylen_flag & AES_KEYLEN_128)) - return 1; - break; - case 24: - if (!(keylen_flag & AES_KEYLEN_192)) - return 1; - break; - case 32: - if (!(keylen_flag & AES_KEYLEN_256)) - return 1; - break; - default: - return -1; - break; - } - return 0; -} - static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { @@ -115,72 +85,44 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; - int ret; + unsigned long fc; - ret = need_fallback(key_len); - if (ret < 0) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } + /* Pick the correct function code based on the key length */ + fc = (key_len == 16) ? CPACF_KM_AES_128 : + (key_len == 24) ? CPACF_KM_AES_192 : + (key_len == 32) ? CPACF_KM_AES_256 : 0; - sctx->key_len = key_len; - if (!ret) { - memcpy(sctx->key, in_key, key_len); - return 0; - } + /* Check if the function code is available */ + sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + if (!sctx->fc) + return setkey_fallback_cip(tfm, in_key, key_len); - return setkey_fallback_cip(tfm, in_key, key_len); + sctx->key_len = key_len; + memcpy(sctx->key, in_key, key_len); + return 0; } static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - if (unlikely(need_fallback(sctx->key_len))) { + if (unlikely(!sctx->fc)) { crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); return; } - - switch (sctx->key_len) { - case 16: - cpacf_km(CPACF_KM_AES_128, - &sctx->key, out, in, AES_BLOCK_SIZE); - break; - case 24: - cpacf_km(CPACF_KM_AES_192, - &sctx->key, out, in, AES_BLOCK_SIZE); - break; - case 32: - cpacf_km(CPACF_KM_AES_256, - &sctx->key, out, in, AES_BLOCK_SIZE); - break; - } + cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - if (unlikely(need_fallback(sctx->key_len))) { + if (unlikely(!sctx->fc)) { crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); return; } - - switch (sctx->key_len) { - case 16: - cpacf_km(CPACF_KM_AES_128 | CPACF_DECRYPT, - &sctx->key, out, in, AES_BLOCK_SIZE); - break; - case 24: - cpacf_km(CPACF_KM_AES_192 | CPACF_DECRYPT, - &sctx->key, out, in, AES_BLOCK_SIZE); - break; - case 32: - cpacf_km(CPACF_KM_AES_256 | CPACF_DECRYPT, - &sctx->key, out, in, AES_BLOCK_SIZE); - break; - } + cpacf_km(sctx->fc | CPACF_DECRYPT, + &sctx->key, out, in, AES_BLOCK_SIZE); } static int fallback_init_cip(struct crypto_tfm *tfm) @@ -289,27 +231,21 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - int ret; + unsigned long fc; - ret = need_fallback(key_len); - if (ret > 0) { - sctx->key_len = key_len; - return setkey_fallback_blk(tfm, in_key, key_len); - } + /* Pick the correct function code based on the key length */ + fc = (key_len == 16) ? CPACF_KM_AES_128 : + (key_len == 24) ? CPACF_KM_AES_192 : + (key_len == 32) ? CPACF_KM_AES_256 : 0; - switch (key_len) { - case 16: - sctx->fc = CPACF_KM_AES_128; - break; - case 24: - sctx->fc = CPACF_KM_AES_192; - break; - case 32: - sctx->fc = CPACF_KM_AES_256; - break; - } + /* Check if the function code is available */ + sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + if (!sctx->fc) + return setkey_fallback_blk(tfm, in_key, key_len); - return aes_set_key(tfm, in_key, key_len); + sctx->key_len = key_len; + memcpy(sctx->key, in_key, key_len); + return 0; } static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, @@ -340,7 +276,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; - if (unlikely(need_fallback(sctx->key_len))) + if (unlikely(!sctx->fc)) return fallback_blk_enc(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -354,7 +290,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; - if (unlikely(need_fallback(sctx->key_len))) + if (unlikely(!sctx->fc)) return fallback_blk_dec(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -413,27 +349,21 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - int ret; + unsigned long fc; - ret = need_fallback(key_len); - if (ret > 0) { - sctx->key_len = key_len; - return setkey_fallback_blk(tfm, in_key, key_len); - } + /* Pick the correct function code based on the key length */ + fc = (key_len == 16) ? CPACF_KMC_AES_128 : + (key_len == 24) ? CPACF_KMC_AES_192 : + (key_len == 32) ? CPACF_KMC_AES_256 : 0; - switch (key_len) { - case 16: - sctx->fc = CPACF_KMC_AES_128; - break; - case 24: - sctx->fc = CPACF_KMC_AES_192; - break; - case 32: - sctx->fc = CPACF_KMC_AES_256; - break; - } + /* Check if the function code is available */ + sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; + if (!sctx->fc) + return setkey_fallback_blk(tfm, in_key, key_len); - return aes_set_key(tfm, in_key, key_len); + sctx->key_len = key_len; + memcpy(sctx->key, in_key, key_len); + return 0; } static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, @@ -476,7 +406,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; - if (unlikely(need_fallback(sctx->key_len))) + if (unlikely(!sctx->fc)) return fallback_blk_enc(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -490,7 +420,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; - if (unlikely(need_fallback(sctx->key_len))) + if (unlikely(!sctx->fc)) return fallback_blk_dec(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -582,33 +512,27 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; + unsigned long fc; int err; err = xts_check_key(tfm, in_key, key_len); if (err) return err; - switch (key_len) { - case 32: - xts_ctx->fc = CPACF_KM_XTS_128; - memcpy(xts_ctx->key + 16, in_key, 16); - memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16); - break; - case 48: - xts_ctx->fc = 0; - xts_fallback_setkey(tfm, in_key, key_len); - break; - case 64: - xts_ctx->fc = CPACF_KM_XTS_256; - memcpy(xts_ctx->key, in_key, 32); - memcpy(xts_ctx->pcc_key, in_key + 32, 32); - break; - default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } + /* Pick the correct function code based on the key length */ + fc = (key_len == 32) ? CPACF_KM_XTS_128 : + (key_len == 64) ? CPACF_KM_XTS_256 : 0; + + /* Check if the function code is available */ + xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + if (!xts_ctx->fc) + return xts_fallback_setkey(tfm, in_key, key_len); + + /* Split the XTS key into the two subkeys */ + key_len = key_len / 2; xts_ctx->key_len = key_len; + memcpy(xts_ctx->key, in_key, key_len); + memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); return 0; } @@ -616,7 +540,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_xts_ctx *xts_ctx, struct blkcipher_walk *walk) { - unsigned int offset = (xts_ctx->key_len >> 1) & 0x10; + unsigned int offset = xts_ctx->key_len & 0x10; int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; unsigned int n; @@ -634,11 +558,11 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); - memcpy(pcc_param.key, xts_ctx->pcc_key, 32); + memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); /* remove decipher modifier bit from 'func' and call PCC */ cpacf_pcc(func & 0x7f, &pcc_param.key[offset]); - memcpy(xts_param.key, xts_ctx->key, 32); + memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); memcpy(xts_param.init, pcc_param.xts, 16); do { /* only use complete blocks */ @@ -662,7 +586,7 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; - if (unlikely(xts_ctx->key_len == 48)) + if (unlikely(!xts_ctx->fc)) return xts_fallback_encrypt(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -676,7 +600,7 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; - if (unlikely(xts_ctx->key_len == 48)) + if (unlikely(!xts_ctx->fc)) return xts_fallback_decrypt(desc, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes); @@ -735,20 +659,21 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned long fc; - switch (key_len) { - case 16: - sctx->fc = CPACF_KMCTR_AES_128; - break; - case 24: - sctx->fc = CPACF_KMCTR_AES_192; - break; - case 32: - sctx->fc = CPACF_KMCTR_AES_256; - break; - } + /* Pick the correct function code based on the key length */ + fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : + (key_len == 24) ? CPACF_KMCTR_AES_192 : + (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; + + /* Check if the function code is available */ + sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; + if (!sctx->fc) + return setkey_fallback_blk(tfm, in_key, key_len); - return aes_set_key(tfm, in_key, key_len); + sctx->key_len = key_len; + memcpy(sctx->key, in_key, key_len); + return 0; } static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) @@ -832,6 +757,9 @@ static int ctr_aes_encrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (unlikely(!sctx->fc)) + return fallback_blk_enc(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); return ctr_aes_crypt(desc, sctx->fc, sctx, &walk); } @@ -843,6 +771,9 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (unlikely(!sctx->fc)) + return fallback_blk_dec(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); return ctr_aes_crypt(desc, sctx->fc | CPACF_DECRYPT, sctx, &walk); } @@ -851,11 +782,14 @@ static struct crypto_alg ctr_aes_alg = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-s390", .cra_priority = 400, /* combo: aes + ctr */ - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct s390_aes_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -893,43 +827,40 @@ static int __init aes_s390_init(void) { int ret; - if (cpacf_query(CPACF_KM, CPACF_KM_AES_128)) - keylen_flag |= AES_KEYLEN_128; - if (cpacf_query(CPACF_KM, CPACF_KM_AES_192)) - keylen_flag |= AES_KEYLEN_192; - if (cpacf_query(CPACF_KM, CPACF_KM_AES_256)) - keylen_flag |= AES_KEYLEN_256; - - if (!keylen_flag) - return -EOPNOTSUPP; - - /* z9 109 and z9 BC/EC only support 128 bit key length */ - if (keylen_flag == AES_KEYLEN_128) - pr_info("AES hardware acceleration is only available for" - " 128-bit keys\n"); - - ret = aes_s390_register_alg(&aes_alg); - if (ret) - goto out_err; + /* Query available functions for KM, KMC and KMCTR */ + cpacf_query(CPACF_KM, &km_functions); + cpacf_query(CPACF_KMC, &kmc_functions); + cpacf_query(CPACF_KMCTR, &kmctr_functions); - ret = aes_s390_register_alg(&ecb_aes_alg); - if (ret) - goto out_err; + if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || + cpacf_test_func(&km_functions, CPACF_KM_AES_192) || + cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { + ret = aes_s390_register_alg(&aes_alg); + if (ret) + goto out_err; + ret = aes_s390_register_alg(&ecb_aes_alg); + if (ret) + goto out_err; + } - ret = aes_s390_register_alg(&cbc_aes_alg); - if (ret) - goto out_err; + if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || + cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || + cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { + ret = aes_s390_register_alg(&cbc_aes_alg); + if (ret) + goto out_err; + } - if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128) && - cpacf_query(CPACF_KM, CPACF_KM_XTS_256)) { + if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || + cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { ret = aes_s390_register_alg(&xts_aes_alg); if (ret) goto out_err; } - if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128) && - cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192) && - cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256)) { + if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || + cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || + cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { ctrblk = (u8 *) __get_free_page(GFP_KERNEL); if (!ctrblk) { ret = -ENOMEM; diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index b77a546f1e76..965587eefc39 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -27,6 +27,8 @@ static u8 *ctrblk; static DEFINE_SPINLOCK(ctrblk_lock); +static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; + struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; u8 key[DES3_KEY_SIZE]; @@ -36,12 +38,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; u32 tmp[DES_EXPKEY_WORDS]; /* check for weak keys */ - if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { - *flags |= CRYPTO_TFM_RES_WEAK_KEY; + if (!des_ekey(tmp, key) && + (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -238,13 +240,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], DES_KEY_SIZE)) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { - *flags |= CRYPTO_TFM_RES_WEAK_KEY; + (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } memcpy(ctx->key, key, key_len); @@ -554,39 +555,53 @@ static int __init des_s390_init(void) { int ret; - if (!cpacf_query(CPACF_KM, CPACF_KM_DEA) || - !cpacf_query(CPACF_KM, CPACF_KM_TDEA_192)) - return -EOPNOTSUPP; - - ret = des_s390_register_alg(&des_alg); - if (ret) - goto out_err; - ret = des_s390_register_alg(&ecb_des_alg); - if (ret) - goto out_err; - ret = des_s390_register_alg(&cbc_des_alg); - if (ret) - goto out_err; - ret = des_s390_register_alg(&des3_alg); - if (ret) - goto out_err; - ret = des_s390_register_alg(&ecb_des3_alg); - if (ret) - goto out_err; - ret = des_s390_register_alg(&cbc_des3_alg); - if (ret) - goto out_err; - - if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA) && - cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192)) { + /* Query available functions for KM, KMC and KMCTR */ + cpacf_query(CPACF_KM, &km_functions); + cpacf_query(CPACF_KMC, &kmc_functions); + cpacf_query(CPACF_KMCTR, &kmctr_functions); + + if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) { + ret = des_s390_register_alg(&des_alg); + if (ret) + goto out_err; + ret = des_s390_register_alg(&ecb_des_alg); + if (ret) + goto out_err; + } + if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) { + ret = des_s390_register_alg(&cbc_des_alg); + if (ret) + goto out_err; + } + if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) { + ret = des_s390_register_alg(&des3_alg); + if (ret) + goto out_err; + ret = des_s390_register_alg(&ecb_des3_alg); + if (ret) + goto out_err; + } + if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) { + ret = des_s390_register_alg(&cbc_des3_alg); + if (ret) + goto out_err; + } + + if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) || + cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) { ctrblk = (u8 *) __get_free_page(GFP_KERNEL); if (!ctrblk) { ret = -ENOMEM; goto out_err; } + } + + if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) { ret = des_s390_register_alg(&ctr_des_alg); if (ret) goto out_err; + } + if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) { ret = des_s390_register_alg(&ctr_des3_alg); if (ret) goto out_err; diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index 8e87f5176799..564616d48d8b 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -136,7 +136,7 @@ static struct shash_alg ghash_alg = { static int __init ghash_mod_init(void) { - if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH)) + if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_GHASH)) return -EOPNOTSUPP; return crypto_register_shash(&ghash_alg); diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index bbf2af74c549..79e3a1f6313a 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -757,13 +757,13 @@ static int __init prng_init(void) int ret; /* check if the CPU has a PRNG */ - if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG)) + if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) return -EOPNOTSUPP; /* choose prng mode */ if (prng_mode != PRNG_MODE_TDES) { /* check for MSA5 support for PPNO operations */ - if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) { + if (!cpacf_query_func(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) { if (prng_mode == PRNG_MODE_SHA512) { pr_err("The prng module cannot " "start in SHA-512 mode\n"); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 5fbf91bbb478..c7de53d8da75 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -91,7 +91,7 @@ static struct shash_alg alg = { static int __init sha1_s390_init(void) { - if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1)) + if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_1)) return -EOPNOTSUPP; return crypto_register_shash(&alg); } diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 10aac0b11988..53c277999a28 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -123,7 +123,7 @@ static int __init sha256_s390_init(void) { int ret; - if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256)) + if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256)) return -EOPNOTSUPP; ret = crypto_register_shash(&sha256_alg); if (ret < 0) diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index ea85757be407..2f4caa1ef123 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -133,7 +133,7 @@ static int __init init(void) { int ret; - if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512)) + if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_512)) return -EOPNOTSUPP; if ((ret = crypto_register_shash(&sha512_alg)) < 0) goto out; |