diff options
author | Horia Geantă <horia.geanta@nxp.com> | 2017-02-10 14:07:25 +0200 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2017-02-15 13:23:43 +0800 |
commit | 944c3d4dca34403e802287a1e7e9d02c06dce0d5 (patch) | |
tree | fb737fa746ba06a2a709a91d6de57c5e768074ff /drivers/crypto | |
parent | 0355d23d4034f42b28db19520bc8865e26053404 (diff) | |
download | linux-944c3d4dca34403e802287a1e7e9d02c06dce0d5.tar.bz2 |
crypto: caam - fix state buffer DMA (un)mapping
If we register the DMA API debug notification chain to
receive platform bus events:
dma_debug_add_bus(&platform_bus_type);
we start receiving warnings after a simple test like "modprobe caam_jr &&
modprobe caamhash && modprobe -r caamhash && modprobe -r caam_jr":
platform ffe301000.jr: DMA-API: device driver has pending DMA allocations while released from device [count=1938]
One of leaked entries details: [device address=0x0000000173fda090] [size=63 bytes] [mapped with DMA_TO_DEVICE] [mapped as single]
It turns out there are several issues with handling buf_dma (mapping of buffer
holding the previous chunk smaller than hash block size):
-detection of buf_dma mapping failure occurs too late, after a job descriptor
using that value has been submitted for execution
-dma mapping leak - unmapping is not performed in all places: for e.g.
in ahash_export or in most ahash_fin* callbacks (due to current back-to-back
implementation of buf_dma unmapping/mapping)
Fix these by:
-calling dma_mapping_error() on buf_dma right after the mapping and providing
an error code if needed
-unmapping buf_dma during the "job done" (ahash_done_*) callbacks
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/caamhash.c | 107 |
1 files changed, 52 insertions, 55 deletions
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index b37d555a80d0..da4f94eab3da 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -194,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, return dst_dma; } -/* Map current buffer in state and put it in link table */ -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, - struct sec4_sg_entry *sec4_sg, - u8 *buf, int buflen) +/* Map current buffer in state (if length > 0) and put it in link table */ +static inline int buf_map_to_sec4_sg(struct device *jrdev, + struct sec4_sg_entry *sec4_sg, + struct caam_hash_state *state) { - dma_addr_t buf_dma; + int buflen = *current_buflen(state); - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); + if (!buflen) + return 0; - return buf_dma; -} + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map buf\n"); + state->buf_dma = 0; + return -ENOMEM; + } -/* - * Only put buffer in link table if it contains data, which is possible, - * since a buffer has previously been used, and needs to be unmapped, - */ -static inline dma_addr_t -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, - u8 *buf, dma_addr_t buf_dma, int buflen, - int last_buflen) -{ - if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); - if (buflen) - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); - else - buf_dma = 0; - - return buf_dma; + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); + + return 0; } /* Map state->caam_ctx, and add it to link table */ @@ -491,6 +482,8 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len) { + struct caam_hash_state *state = ahash_request_ctx(req); + if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); if (edesc->dst_dma) @@ -499,6 +492,12 @@ static inline void ahash_unmap(struct device *dev, if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, DMA_TO_DEVICE); + + if (state->buf_dma) { + dma_unmap_single(dev, state->buf_dma, *current_buflen(state), + DMA_TO_DEVICE); + state->buf_dma = 0; + } } static inline void ahash_unmap_ctx(struct device *dev, @@ -557,8 +556,8 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); -#ifdef DEBUG struct caam_hash_state *state = ahash_request_ctx(req); +#ifdef DEBUG int digestsize = crypto_ahash_digestsize(ahash); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); @@ -569,6 +568,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); + switch_buf(state); kfree(edesc); #ifdef DEBUG @@ -625,8 +625,8 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); -#ifdef DEBUG struct caam_hash_state *state = ahash_request_ctx(req); +#ifdef DEBUG int digestsize = crypto_ahash_digestsize(ahash); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); @@ -637,6 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); + switch_buf(state); kfree(edesc); #ifdef DEBUG @@ -777,10 +778,9 @@ static int ahash_update_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, - edesc->sec4_sg + 1, - buf, state->buf_dma, - *buflen, last_buflen); + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); + if (ret) + goto unmap_ctx; if (mapped_nents) { sg_to_sec4_sg_last(req->src, mapped_nents, @@ -795,8 +795,6 @@ static int ahash_update_ctx(struct ahash_request *req) cpu_to_caam32(SEC4_SG_LEN_FIN); } - switch_buf(state); - desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, @@ -853,9 +851,7 @@ static int ahash_final_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); int buflen = *current_buflen(state); - int last_buflen = *alt_buflen(state); u32 *desc; int sec4_sg_bytes, sec4_sg_src_index; int digestsize = crypto_ahash_digestsize(ahash); @@ -882,9 +878,10 @@ static int ahash_final_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, - buf, state->buf_dma, buflen, - last_buflen); + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); + if (ret) + goto unmap_ctx; + (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); @@ -931,9 +928,7 @@ static int ahash_finup_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); int buflen = *current_buflen(state); - int last_buflen = *alt_buflen(state); u32 *desc; int sec4_sg_src_index; int src_nents, mapped_nents; @@ -978,9 +973,9 @@ static int ahash_finup_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, - buf, state->buf_dma, buflen, - last_buflen); + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); + if (ret) + goto unmap_ctx; ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, sec4_sg_src_index, ctx->ctx_len + buflen, @@ -1016,6 +1011,7 @@ static int ahash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; @@ -1025,6 +1021,8 @@ static int ahash_digest(struct ahash_request *req) struct ahash_edesc *edesc; int ret; + state->buf_dma = 0; + src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); @@ -1210,8 +1208,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->dst_dma = 0; - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, - buf, *buflen); + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); + if (ret) + goto unmap_ctx; + sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0); @@ -1221,8 +1221,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) *next_buflen, 0); } - switch_buf(state); - desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, @@ -1284,9 +1282,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); int buflen = *current_buflen(state); - int last_buflen = *alt_buflen(state); u32 *desc; int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); @@ -1328,9 +1324,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, - state->buf_dma, buflen, - last_buflen); + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); + if (ret) + goto unmap; ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, req->nbytes); @@ -1376,8 +1372,8 @@ static int ahash_update_first(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - u8 *next_buf = current_buf(state); - int *next_buflen = current_buflen(state); + u8 *next_buf = alt_buf(state); + int *next_buflen = alt_buflen(state); int to_hash; u32 *desc; int src_nents, mapped_nents; @@ -1459,6 +1455,7 @@ static int ahash_update_first(struct ahash_request *req) state->final = ahash_final_no_ctx; scatterwalk_map_and_copy(next_buf, req->src, 0, req->nbytes, 0); + switch_buf(state); } #ifdef DEBUG print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |