diff options
author | Ryder Lee <ryder.lee@mediatek.com> | 2017-01-20 13:41:13 +0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2017-01-23 22:50:32 +0800 |
commit | 059b14947aaa2b5eb66465be5fa0ba816044ab01 (patch) | |
tree | 32c863a64e6b72d637027e67ec2634d595cdf31a /drivers/crypto/mediatek/mtk-sha.c | |
parent | 0abc271494d1209e097f2837e324ecd9c05716e5 (diff) | |
download | linux-059b14947aaa2b5eb66465be5fa0ba816044ab01.tar.bz2 |
crypto: mediatek - fix typo and indentation
Dummy patch to fix typo and indentation.
Signed-off-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/mediatek/mtk-sha.c')
-rw-r--r-- | drivers/crypto/mediatek/mtk-sha.c | 40 |
1 files changed, 17 insertions, 23 deletions
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c index 2536ebc53602..55e3805fba07 100644 --- a/drivers/crypto/mediatek/mtk-sha.c +++ b/drivers/crypto/mediatek/mtk-sha.c @@ -317,9 +317,9 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) * Update input data length field of transform information and * map it to DMA region. */ -static int mtk_sha_info_map(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha, - size_t len) +static int mtk_sha_info_update(struct mtk_cryp *cryp, + struct mtk_sha_rec *sha, + size_t len) { struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); struct mtk_sha_info *info = &ctx->info; @@ -338,7 +338,7 @@ static int mtk_sha_info_map(struct mtk_cryp *cryp, ctx->digcnt += len; ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), - DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); return -EINVAL; @@ -430,20 +430,15 @@ static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, struct mtk_desc *res = ring->res_base + ring->res_pos; int err; - err = mtk_sha_info_map(cryp, sha, len); + err = mtk_sha_info_update(cryp, sha, len); if (err) return err; /* Fill in the command/result descriptors */ - res->hdr = MTK_DESC_FIRST | - MTK_DESC_LAST | - MTK_DESC_BUF_LEN(len); - + res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len); res->buf = cpu_to_le32(cryp->tmp_dma); - cmd->hdr = MTK_DESC_FIRST | - MTK_DESC_LAST | - MTK_DESC_BUF_LEN(len) | + cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) | MTK_DESC_CT_LEN(ctx->ct_size); cmd->buf = cpu_to_le32(addr); @@ -477,7 +472,7 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp, struct mtk_desc *res = ring->res_base + ring->res_pos; int err; - err = mtk_sha_info_map(cryp, sha, len1 + len2); + err = mtk_sha_info_update(cryp, sha, len1 + len2); if (err) return err; @@ -485,8 +480,7 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp, res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST; res->buf = cpu_to_le32(cryp->tmp_dma); - cmd->hdr = MTK_DESC_BUF_LEN(len1) | - MTK_DESC_FIRST | + cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST | MTK_DESC_CT_LEN(ctx->ct_size); cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg)); cmd->ct = cpu_to_le32(ctx->ct_dma); @@ -530,7 +524,7 @@ static int mtk_sha_dma_map(struct mtk_cryp *cryp, size_t count) { ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, - SHA_BUF_SIZE, DMA_TO_DEVICE); + SHA_BUF_SIZE, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { dev_err(cryp->dev, "dma map error\n"); return -EINVAL; @@ -619,7 +613,7 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp, mtk_sha_fill_padding(ctx, len); ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, - SHA_BUF_SIZE, DMA_TO_DEVICE); + SHA_BUF_SIZE, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { dev_err(cryp->dev, "dma map bytes error\n"); return -EINVAL; @@ -658,8 +652,7 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp, static int mtk_sha_final_req(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) { - struct ahash_request *req = sha->req; - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); + struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); size_t count; mtk_sha_fill_padding(ctx, 0); @@ -690,7 +683,8 @@ static int mtk_sha_finish(struct ahash_request *req) } static void mtk_sha_finish_req(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha, int err) + struct mtk_sha_rec *sha, + int err) { if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) err = mtk_sha_finish(sha->req); @@ -850,8 +844,8 @@ static int mtk_sha_digest(struct ahash_request *req) return mtk_sha_init(req) ?: mtk_sha_finup(req); } -static int mtk_sha_setkey(struct crypto_ahash *tfm, - const unsigned char *key, u32 keylen) +static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, + u32 keylen) { struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); struct mtk_sha_hmac_ctx *bctx = tctx->base; @@ -863,7 +857,7 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, shash->tfm = bctx->shash; shash->flags = crypto_shash_get_flags(bctx->shash) & - CRYPTO_TFM_REQ_MAY_SLEEP; + CRYPTO_TFM_REQ_MAY_SLEEP; if (keylen > bs) { err = crypto_shash_digest(shash, key, keylen, bctx->ipad); |