summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/chelsio
diff options
context:
space:
mode:
authorAyush Sawal <ayush.sawal@chelsio.com>2020-06-10 02:54:31 +0530
committerDavid S. Miller <davem@davemloft.net>2020-06-10 17:05:02 -0700
commitfb90a1c85d8f08c85d9fd5729bfdeb786119f219 (patch)
tree12e1766cd5899d61e1c149f98ed6f3aeff5f2e3f /drivers/crypto/chelsio
parent934e36ec5e81b8bc079f3d7acd12beb16cad9531 (diff)
downloadlinux-fb90a1c85d8f08c85d9fd5729bfdeb786119f219.tar.bz2
Crypto/chcr: Calculate src and dst sg lengths separately for dma map
This patch calculates src and dst sg lengths separately for dma mapping in case of aead operation. This fixes a panic which occurs due to the accessing of a zero length sg. Panic: [ 138.173225] kernel BUG at drivers/iommu/intel-iommu.c:1184! Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/crypto/chelsio')
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c63
1 files changed, 45 insertions, 18 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index f26a7a15551a..f8b55137cf7d 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2590,11 +2590,22 @@ int chcr_aead_dma_map(struct device *dev,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
- int dst_size;
+ int src_len, dst_len;
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- 0 : authsize);
- if (!req->cryptlen || !dst_size)
+ /* calculate and handle src and dst sg length separately
+ * for inplace and out-of place operations
+ */
+ if (req->src == req->dst) {
+ src_len = req->assoclen + req->cryptlen + (op_type ?
+ 0 : authsize);
+ dst_len = src_len;
+ } else {
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ }
+
+ if (!req->cryptlen || !src_len || !dst_len)
return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
@@ -2606,20 +2617,23 @@ int chcr_aead_dma_map(struct device *dev,
reqctx->b0_dma = 0;
if (req->src == req->dst) {
error = dma_map_sg(dev, req->src,
- sg_nents_for_len(req->src, dst_size),
+ sg_nents_for_len(req->src, src_len),
DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ error = dma_map_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE);
if (!error)
goto err;
- error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ error = dma_map_sg(dev, req->dst,
+ sg_nents_for_len(req->dst, dst_len),
DMA_FROM_DEVICE);
if (!error) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_TO_DEVICE);
goto err;
}
}
@@ -2637,24 +2651,37 @@ void chcr_aead_dma_unmap(struct device *dev,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
- int dst_size;
+ int src_len, dst_len;
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- 0 : authsize);
- if (!req->cryptlen || !dst_size)
+ /* calculate and handle src and dst sg length separately
+ * for inplace and out-of place operations
+ */
+ if (req->src == req->dst) {
+ src_len = req->assoclen + req->cryptlen + (op_type ?
+ 0 : authsize);
+ dst_len = src_len;
+ } else {
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ }
+
+ if (!req->cryptlen || !src_len || !dst_len)
return;
dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src,
- sg_nents_for_len(req->src, dst_size),
+ sg_nents_for_len(req->src, src_len),
DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst,
+ sg_nents_for_len(req->dst, dst_len),
+ DMA_FROM_DEVICE);
}
}