summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp/ccp-crypto-aes-xts.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2019-11-09 18:09:29 +0100
committerHerbert Xu <herbert@gondor.apana.org.au>2019-11-17 09:02:45 +0800
commitbe9fe620af63e76781d869f61d7e306bbe482415 (patch)
tree367756a58438c77d5d7d9dff87f660e15e2bcb0b /drivers/crypto/ccp/ccp-crypto-aes-xts.c
parenteee1d6fca0a02f5a089ad063812f98e717b8e7df (diff)
downloadlinux-be9fe620af63e76781d869f61d7e306bbe482415.tar.bz2
crypto: ccp - switch from ablkcipher to skcipher
Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface") dated 20 august 2015 introduced the new skcipher API which is supposed to replace both blkcipher and ablkcipher. While all consumers of the API have been converted long ago, some producers of the ablkcipher remain, forcing us to keep the ablkcipher support routines alive, along with the matching code to expose [a]blkciphers via the skcipher API. So switch this driver to the skcipher API, allowing us to finally drop the ablkcipher code in the near future. Reviewed-by: Gary R Hook <gary.hook@amd.com> Tested-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccp/ccp-crypto-aes-xts.c')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c94
1 files changed, 47 insertions, 47 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 8e4a531f4f70..04b2517df955 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -24,7 +24,7 @@ struct ccp_aes_xts_def {
const char *drv_name;
};
-static struct ccp_aes_xts_def aes_xts_algs[] = {
+static const struct ccp_aes_xts_def aes_xts_algs[] = {
{
.name = "xts(aes)",
.drv_name = "xts-aes-ccp",
@@ -61,26 +61,25 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
- memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+ memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
return 0;
}
-static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
- struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ccpversion = ccp_version();
int ret;
- ret = xts_check_key(xfm, key, key_len);
+ ret = xts_verify_key(tfm, key, key_len);
if (ret)
return ret;
@@ -102,11 +101,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
-static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+static int ccp_aes_xts_crypt(struct skcipher_request *req,
unsigned int encrypt)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
unsigned int ccpversion = ccp_version();
unsigned int fallback = 0;
unsigned int unit;
@@ -116,7 +116,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!ctx->u.aes.key_len)
return -EINVAL;
- if (!req->info)
+ if (!req->iv)
return -EINVAL;
/* Check conditions under which the CCP can fulfill a request. The
@@ -127,7 +127,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
*/
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
- if (req->nbytes == xts_unit_sizes[unit].size) {
+ if (req->cryptlen == xts_unit_sizes[unit].size) {
unit_size = unit;
break;
}
@@ -155,14 +155,14 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return ret;
}
- memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -177,7 +177,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.u.xts.iv = &rctx->iv_sg;
rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
rctx->cmd.u.xts.src = req->src;
- rctx->cmd.u.xts.src_len = req->nbytes;
+ rctx->cmd.u.xts.src_len = req->cryptlen;
rctx->cmd.u.xts.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
@@ -185,19 +185,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
return ret;
}
-static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_encrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 1);
}
-static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+static int ccp_aes_xts_decrypt(struct skcipher_request *req)
{
return ccp_aes_xts_crypt(req, 0);
}
-static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
@@ -212,14 +212,14 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
return 0;
}
-static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -227,8 +227,8 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
static int ccp_register_aes_xts_alg(struct list_head *head,
const struct ccp_aes_xts_def *def)
{
- struct ccp_crypto_ablkcipher_alg *ccp_alg;
- struct crypto_alg *alg;
+ struct ccp_crypto_skcipher_alg *ccp_alg;
+ struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
@@ -239,30 +239,30 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_blocksize = AES_BLOCK_SIZE;
- alg->cra_ctxsize = sizeof(struct ccp_ctx);
- alg->cra_priority = CCP_CRA_PRIORITY;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
- alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
- alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
- alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
- alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
- alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
- alg->cra_init = ccp_aes_xts_cra_init;
- alg->cra_exit = ccp_aes_xts_cra_exit;
- alg->cra_module = THIS_MODULE;
-
- ret = crypto_register_alg(alg);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->base.cra_blocksize = AES_BLOCK_SIZE;
+ alg->base.cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->base.cra_priority = CCP_CRA_PRIORITY;
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->setkey = ccp_aes_xts_setkey;
+ alg->encrypt = ccp_aes_xts_encrypt;
+ alg->decrypt = ccp_aes_xts_decrypt;
+ alg->min_keysize = AES_MIN_KEY_SIZE * 2;
+ alg->max_keysize = AES_MAX_KEY_SIZE * 2;
+ alg->ivsize = AES_BLOCK_SIZE;
+ alg->init = ccp_aes_xts_init_tfm;
+ alg->exit = ccp_aes_xts_exit_tfm;
+
+ ret = crypto_register_skcipher(alg);
if (ret) {
- pr_err("%s ablkcipher algorithm registration error (%d)\n",
- alg->cra_name, ret);
+ pr_err("%s skcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}