summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/marvell/tdma.c
diff options
context:
space:
mode:
authorRomain Perier <romain.perier@free-electrons.com>2016-06-21 10:08:34 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2016-06-23 18:14:03 +0800
commitbac8e805a30dc2a29c3fdde9d822e59c75e710d7 (patch)
tree0eb0fdc0a856f601eab7f627ffe10e5312d90a3b /drivers/crypto/marvell/tdma.c
parentb99acf79a17bf402ea5759ad9d44eba3ed837a59 (diff)
downloadlinux-bac8e805a30dc2a29c3fdde9d822e59c75e710d7.tar.bz2
crypto: marvell - Copy IV vectors by DMA transfers for acipher requests
Add a TDMA descriptor at the end of the request for copying the output IV vector via a DMA transfer. This is a good way for offloading as much as processing as possible to the DMA and the crypto engine. This is also required for processing multiple cipher requests in chained mode, otherwise the content of the IV vector would be overwritten by the last processed request. Signed-off-by: Romain Perier <romain.perier@free-electrons.com> Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/marvell/tdma.c')
-rw-r--r--drivers/crypto/marvell/tdma.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 4d23bf95be9c..dc0733538606 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,6 +69,9 @@ void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
+ else if (type == CESA_TDMA_IV)
+ dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
+ le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -120,6 +123,32 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
+int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+ u32 size, u32 flags, gfp_t gfp_flags)
+{
+
+ struct mv_cesa_tdma_desc *tdma;
+ u8 *iv;
+ dma_addr_t dma_handle;
+
+ tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
+ if (IS_ERR(tdma))
+ return PTR_ERR(tdma);
+
+ iv = dma_pool_alloc(cesa_dev->dma->iv_pool, flags, &dma_handle);
+ if (!iv)
+ return -ENOMEM;
+
+ tdma->byte_cnt = cpu_to_le32(size | BIT(31));
+ tdma->src = src;
+ tdma->dst = cpu_to_le32(dma_handle);
+ tdma->data = iv;
+
+ flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
+ tdma->flags = flags | CESA_TDMA_IV;
+ return 0;
+}
+
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,