summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp/ccp-crypto-aes-xts.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2022-12-02 17:20:49 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2022-12-09 18:45:00 +0800
commit99c6b20edfc031610240afca97ba9be5ec6f5750 (patch)
treed915c78e08364358e435944bb5b994ffa354616c /drivers/crypto/ccp/ccp-crypto-aes-xts.c
parent1c64a7e1f931821acadf964c5ddb0dc41abf9e20 (diff)
downloadlinux-99c6b20edfc031610240afca97ba9be5ec6f5750.tar.bz2
crypto: ccp - Set DMA alignment explicitly
This driver has been implicitly relying on kmalloc alignment to be sufficient for DMA. This may no longer be the case with upcoming arm64 changes. This patch changes it to explicitly request DMA alignment from the Crypto API. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccp/ccp-crypto-aes-xts.c')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 6849261ca47d..93f735d6b02b 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -62,7 +62,7 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
if (ret)
return ret;
@@ -75,7 +75,7 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
unsigned int ccpversion = ccp_version();
int ret;
@@ -105,8 +105,8 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req,
unsigned int encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
+ struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
unsigned int ccpversion = ccp_version();
unsigned int fallback = 0;
unsigned int unit;
@@ -196,7 +196,7 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req)
static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
struct crypto_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
@@ -210,15 +210,16 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) +
- crypto_skcipher_reqsize(fallback_tfm));
+ crypto_skcipher_set_reqsize_dma(tfm,
+ sizeof(struct ccp_aes_req_ctx) +
+ crypto_skcipher_reqsize(fallback_tfm));
return 0;
}
static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -246,7 +247,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
alg->base.cra_blocksize = AES_BLOCK_SIZE;
- alg->base.cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->base.cra_ctxsize = sizeof(struct ccp_ctx) +
+ crypto_dma_padding();
alg->base.cra_priority = CCP_CRA_PRIORITY;
alg->base.cra_module = THIS_MODULE;