diff options
Diffstat (limited to 'drivers/crypto/mediatek/mtk-aes.c')
-rw-r--r-- | drivers/crypto/mediatek/mtk-aes.c | 421 |
1 files changed, 213 insertions, 208 deletions
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 3a47cdb8f0c8..9e845e866dec 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -19,13 +19,10 @@ #define AES_BUF_ORDER 2 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ & ~(AES_BLOCK_SIZE - 1)) +#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \ + AES_BLOCK_SIZE * 2) +#define AES_MAX_CT_SIZE 6 -/* AES command token size */ -#define AES_CT_SIZE_ECB 2 -#define AES_CT_SIZE_CBC 3 -#define AES_CT_SIZE_CTR 3 -#define AES_CT_SIZE_GCM_OUT 5 -#define AES_CT_SIZE_GCM_IN 6 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) /* AES-CBC/ECB/CTR command token */ @@ -50,6 +47,8 @@ #define AES_TFM_128BITS cpu_to_le32(0xb << 16) #define AES_TFM_192BITS cpu_to_le32(0xd << 16) #define AES_TFM_256BITS cpu_to_le32(0xf << 16) +#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21) +#define AES_TFM_GHASH cpu_to_le32(0x4 << 23) /* AES transform information word 1 fields */ #define AES_TFM_ECB cpu_to_le32(0x0 << 0) #define AES_TFM_CBC cpu_to_le32(0x1 << 0) @@ -59,10 +58,9 @@ #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) -#define AES_TFM_GHASH_DIG cpu_to_le32(0x2 << 21) -#define AES_TFM_GHASH cpu_to_le32(0x4 << 23) /* AES flags */ +#define AES_FLAGS_CIPHER_MSK GENMASK(2, 0) #define AES_FLAGS_ECB BIT(0) #define AES_FLAGS_CBC BIT(1) #define AES_FLAGS_CTR BIT(2) @@ -70,19 +68,15 @@ #define AES_FLAGS_ENCRYPT BIT(4) #define AES_FLAGS_BUSY BIT(5) +#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) + /** - * Command token(CT) is a set of hardware instructions that - * are used to control engine's processing flow of AES. - * - * Transform information(TFM) is used to define AES state and - * contains all keys and initial vectors. - * - * The engine requires CT and TFM to do: - * - Commands decoding and control of the engine's data path. - * - Coordinating hardware data fetch and store operations. - * - Result token construction and output. + * mtk_aes_info - hardware information of AES + * @cmd: command token, hardware instruction + * @tfm: transform state of cipher algorithm. + * @state: contains keys and initial vectors. * - * Memory map of GCM's TFM: + * Memory layout of GCM buffer: * /-----------\ * | AES KEY | 128/196/256 bits * |-----------| @@ -90,14 +84,16 @@ * |-----------| * | IVs | 4 * 4 bytes * \-----------/ + * + * The engine requires all these info to do: + * - Commands decoding and control of the engine's data path. + * - Coordinating hardware data fetch and store operations. + * - Result token construction and output. */ -struct mtk_aes_ct { - __le32 cmd[AES_CT_SIZE_GCM_IN]; -}; - -struct mtk_aes_tfm { - __le32 ctrl[2]; - __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE * 2)]; +struct mtk_aes_info { + __le32 cmd[AES_MAX_CT_SIZE]; + __le32 tfm[2]; + __le32 state[AES_MAX_STATE_BUF_SIZE]; }; struct mtk_aes_reqctx { @@ -107,11 +103,12 @@ struct mtk_aes_reqctx { struct mtk_aes_base_ctx { struct mtk_cryp *cryp; u32 keylen; + __le32 keymode; + mtk_aes_fn start; - struct mtk_aes_ct ct; + struct mtk_aes_info info; dma_addr_t ct_dma; - struct mtk_aes_tfm tfm; dma_addr_t tfm_dma; __le32 ct_hdr; @@ -248,6 +245,33 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma) sg->length += dma->remainder; } +static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size) +{ + int i; + + for (i = 0; i < SIZE_IN_WORDS(size); i++) + dst[i] = cpu_to_le32(src[i]); +} + +static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size) +{ + int i; + + for (i = 0; i < SIZE_IN_WORDS(size); i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline int mtk_aes_complete(struct mtk_cryp *cryp, + struct mtk_aes_rec *aes, + int err) +{ + aes->flags &= ~AES_FLAGS_BUSY; + aes->areq->complete(aes->areq, err); + /* Handle new request */ + tasklet_schedule(&aes->queue_task); + return err; +} + /* * Write descriptors for processing. This will configure the engine, load * the transform information and then start the packet processing. @@ -262,7 +286,7 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) /* Write command descriptors */ for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) { - cmd = ring->cmd_base + ring->cmd_pos; + cmd = ring->cmd_next; cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); cmd->buf = cpu_to_le32(sg_dma_address(ssg)); @@ -274,25 +298,30 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); } - if (++ring->cmd_pos == MTK_DESC_NUM) - ring->cmd_pos = 0; + /* Shift ring buffer and check boundary */ + if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) + ring->cmd_next = ring->cmd_base; } cmd->hdr |= MTK_DESC_LAST; /* Prepare result descriptors */ for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) { - res = ring->res_base + ring->res_pos; + res = ring->res_next; res->hdr = MTK_DESC_BUF_LEN(dsg->length); res->buf = cpu_to_le32(sg_dma_address(dsg)); if (nents == 0) res->hdr |= MTK_DESC_FIRST; - if (++ring->res_pos == MTK_DESC_NUM) - ring->res_pos = 0; + /* Shift ring buffer and check boundary */ + if (++ring->res_next == ring->res_base + MTK_DESC_NUM) + ring->res_next = ring->res_base; } res->hdr |= MTK_DESC_LAST; + /* Pointer to current result descriptor */ + ring->res_prev = res; + /* Prepare enough space for authenticated tag */ if (aes->flags & AES_FLAGS_GCM) res->hdr += AES_BLOCK_SIZE; @@ -313,9 +342,7 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) { struct mtk_aes_base_ctx *ctx = aes->ctx; - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct), - DMA_TO_DEVICE); - dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm), + dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), DMA_TO_DEVICE); if (aes->src.sg == aes->dst.sg) { @@ -346,16 +373,14 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) { struct mtk_aes_base_ctx *ctx = aes->ctx; + struct mtk_aes_info *info = &ctx->info; - ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct), + ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) - return -EINVAL; + goto exit; - ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma))) - goto tfm_map_err; + ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); if (aes->src.sg == aes->dst.sg) { aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, @@ -382,13 +407,9 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) return mtk_aes_xmit(cryp, aes); sg_map_err: - dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm), - DMA_TO_DEVICE); -tfm_map_err: - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct), - DMA_TO_DEVICE); - - return -EINVAL; + dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); +exit: + return mtk_aes_complete(cryp, aes, -EINVAL); } /* Initialize transform information of CBC/ECB/CTR mode */ @@ -397,50 +418,43 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, { struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); struct mtk_aes_base_ctx *ctx = aes->ctx; + struct mtk_aes_info *info = &ctx->info; + u32 cnt = 0; ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); - ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len); - ctx->ct.cmd[1] = AES_CMD1; + info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); + info->cmd[cnt++] = AES_CMD1; + info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; if (aes->flags & AES_FLAGS_ENCRYPT) - ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT; + info->tfm[0] |= AES_TFM_BASIC_OUT; else - ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN; + info->tfm[0] |= AES_TFM_BASIC_IN; - if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) - ctx->tfm.ctrl[0] |= AES_TFM_128BITS; - else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) - ctx->tfm.ctrl[0] |= AES_TFM_256BITS; - else - ctx->tfm.ctrl[0] |= AES_TFM_192BITS; - - if (aes->flags & AES_FLAGS_CBC) { - const u32 *iv = (const u32 *)req->info; - u32 *iv_state = ctx->tfm.state + ctx->keylen; - int i; - - ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen + - SIZE_IN_WORDS(AES_BLOCK_SIZE)); - ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV; - - for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) - iv_state[i] = cpu_to_le32(iv[i]); - - ctx->ct.cmd[2] = AES_CMD2; - ctx->ct_size = AES_CT_SIZE_CBC; - } else if (aes->flags & AES_FLAGS_ECB) { - ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen); - ctx->tfm.ctrl[1] = AES_TFM_ECB; - - ctx->ct_size = AES_CT_SIZE_ECB; - } else if (aes->flags & AES_FLAGS_CTR) { - ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen + - SIZE_IN_WORDS(AES_BLOCK_SIZE)); - ctx->tfm.ctrl[1] = AES_TFM_CTR_LOAD | AES_TFM_FULL_IV; - - ctx->ct.cmd[2] = AES_CMD2; - ctx->ct_size = AES_CT_SIZE_CTR; + switch (aes->flags & AES_FLAGS_CIPHER_MSK) { + case AES_FLAGS_CBC: + info->tfm[1] = AES_TFM_CBC; + break; + case AES_FLAGS_ECB: + info->tfm[1] = AES_TFM_ECB; + goto ecb; + case AES_FLAGS_CTR: + info->tfm[1] = AES_TFM_CTR_LOAD; + goto ctr; + + default: + /* Should not happen... */ + return; } + + mtk_aes_write_state_le(info->state + ctx->keylen, req->info, + AES_BLOCK_SIZE); +ctr: + info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)); + info->tfm[1] |= AES_TFM_FULL_IV; + info->cmd[cnt++] = AES_CMD2; +ecb: + ctx->ct_size = cnt; } static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, @@ -465,7 +479,7 @@ static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, padlen = mtk_aes_padlen(len); if (len + padlen > AES_BUF_SIZE) - return -ENOMEM; + return mtk_aes_complete(cryp, aes, -ENOMEM); if (!src_aligned) { sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); @@ -525,13 +539,10 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, return ctx->start(cryp, aes); } -static int mtk_aes_complete(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) +static int mtk_aes_transfer_complete(struct mtk_cryp *cryp, + struct mtk_aes_rec *aes) { - aes->flags &= ~AES_FLAGS_BUSY; - aes->areq->complete(aes->areq, 0); - - /* Handle new request */ - return mtk_aes_handle_queue(cryp, aes->id, NULL); + return mtk_aes_complete(cryp, aes, 0); } static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) @@ -540,7 +551,7 @@ static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req); mtk_aes_set_mode(aes, rctx); - aes->resume = mtk_aes_complete; + aes->resume = mtk_aes_transfer_complete; return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes); } @@ -557,15 +568,14 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); struct scatterlist *src, *dst; - int i; - u32 start, end, ctr, blocks, *iv_state; + u32 start, end, ctr, blocks; size_t datalen; bool fragmented = false; /* Check for transfer completion. */ cctx->offset += aes->total; if (cctx->offset >= req->nbytes) - return mtk_aes_complete(cryp, aes); + return mtk_aes_transfer_complete(cryp, aes); /* Compute data length. */ datalen = req->nbytes - cctx->offset; @@ -587,9 +597,8 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); /* Write IVs into transform state buffer. */ - iv_state = ctx->tfm.state + ctx->keylen; - for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) - iv_state[i] = cpu_to_le32(cctx->iv[i]); + mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv, + AES_BLOCK_SIZE); if (unlikely(fragmented)) { /* @@ -599,7 +608,6 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) cctx->iv[3] = cpu_to_be32(ctr); crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); } - aes->resume = mtk_aes_ctr_transfer; return mtk_aes_dma(cryp, aes, src, dst, datalen); } @@ -615,6 +623,7 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) memcpy(cctx->iv, req->info, AES_BLOCK_SIZE); cctx->offset = 0; aes->total = 0; + aes->resume = mtk_aes_ctr_transfer; return mtk_aes_ctr_transfer(cryp, aes); } @@ -624,21 +633,25 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, u32 keylen) { struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); - const u32 *aes_key = (const u32 *)key; - u32 *key_state = ctx->tfm.state; - int i; - if (keylen != AES_KEYSIZE_128 && - keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { + switch (keylen) { + case AES_KEYSIZE_128: + ctx->keymode = AES_TFM_128BITS; + break; + case AES_KEYSIZE_192: + ctx->keymode = AES_TFM_192BITS; + break; + case AES_KEYSIZE_256: + ctx->keymode = AES_TFM_256BITS; + break; + + default: crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->keylen = SIZE_IN_WORDS(keylen); - - for (i = 0; i < ctx->keylen; i++) - key_state[i] = cpu_to_le32(aes_key[i]); + mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); return 0; } @@ -789,6 +802,19 @@ mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx) return container_of(ctx, struct mtk_aes_gcm_ctx, base); } +/* + * Engine will verify and compare tag automatically, so we just need + * to check returned status which stored in the result descriptor. + */ +static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp, + struct mtk_aes_rec *aes) +{ + u32 status = cryp->ring[aes->id]->res_prev->ct; + + return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? + -EBADMSG : 0); +} + /* Initialize transform information of GCM mode */ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, @@ -797,45 +823,35 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, struct aead_request *req = aead_request_cast(aes->areq); struct mtk_aes_base_ctx *ctx = aes->ctx; struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - const u32 *iv = (const u32 *)req->iv; - u32 *iv_state = ctx->tfm.state + ctx->keylen + - SIZE_IN_WORDS(AES_BLOCK_SIZE); + struct mtk_aes_info *info = &ctx->info; u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); - int i; + u32 cnt = 0; ctx->ct_hdr = AES_CT_CTRL_HDR | len; - ctx->ct.cmd[0] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); - ctx->ct.cmd[1] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); - ctx->ct.cmd[2] = AES_GCM_CMD2; - ctx->ct.cmd[3] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); + info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); + info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); + info->cmd[cnt++] = AES_GCM_CMD2; + info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); if (aes->flags & AES_FLAGS_ENCRYPT) { - ctx->ct.cmd[4] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); - ctx->ct_size = AES_CT_SIZE_GCM_OUT; - ctx->tfm.ctrl[0] = AES_TFM_GCM_OUT; + info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); + info->tfm[0] = AES_TFM_GCM_OUT; } else { - ctx->ct.cmd[4] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); - ctx->ct.cmd[5] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); - ctx->ct_size = AES_CT_SIZE_GCM_IN; - ctx->tfm.ctrl[0] = AES_TFM_GCM_IN; + info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); + info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); + info->tfm[0] = AES_TFM_GCM_IN; } + ctx->ct_size = cnt; - if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) - ctx->tfm.ctrl[0] |= AES_TFM_128BITS; - else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) - ctx->tfm.ctrl[0] |= AES_TFM_256BITS; - else - ctx->tfm.ctrl[0] |= AES_TFM_192BITS; + info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | + ctx->keymode; + info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | + AES_TFM_ENC_HASH; - ctx->tfm.ctrl[0] |= AES_TFM_GHASH_DIG | AES_TFM_GHASH | - AES_TFM_SIZE(ctx->keylen + SIZE_IN_WORDS( - AES_BLOCK_SIZE + ivsize)); - ctx->tfm.ctrl[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | - AES_TFM_3IV | AES_TFM_ENC_HASH; - - for (i = 0; i < SIZE_IN_WORDS(ivsize); i++) - iv_state[i] = cpu_to_le32(iv[i]); + mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS( + AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize); } static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, @@ -856,7 +872,7 @@ static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, if (!src_aligned || !dst_aligned) { if (aes->total > AES_BUF_SIZE) - return -ENOMEM; + return mtk_aes_complete(cryp, aes, -ENOMEM); if (!src_aligned) { sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); @@ -892,6 +908,8 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) if (aes->flags & AES_FLAGS_ENCRYPT) { u32 tag[4]; + + aes->resume = mtk_aes_transfer_complete; /* Compute total process length. */ aes->total = len + gctx->authsize; /* Compute text length. */ @@ -899,10 +917,10 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) /* Hardware will append authenticated tag to output buffer */ scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); } else { + aes->resume = mtk_aes_gcm_tag_verify; aes->total = len; gctx->textlen = req->cryptlen - gctx->authsize; } - aes->resume = mtk_aes_complete; return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); } @@ -915,7 +933,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) rctx->mode = AES_FLAGS_GCM | mode; return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), - &req->base); + &req->base); } static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err) @@ -949,24 +967,26 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, struct scatterlist sg[1]; struct skcipher_request req; } *data; - const u32 *aes_key; - u32 *key_state, *hash_state; - int err, i; + int err; - if (keylen != AES_KEYSIZE_256 && - keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_128) { + switch (keylen) { + case AES_KEYSIZE_128: + ctx->keymode = AES_TFM_128BITS; + break; + case AES_KEYSIZE_192: + ctx->keymode = AES_TFM_192BITS; + break; + case AES_KEYSIZE_256: + ctx->keymode = AES_TFM_256BITS; + break; + + default: crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } - key_state = ctx->tfm.state; - aes_key = (u32 *)key; ctx->keylen = SIZE_IN_WORDS(keylen); - for (i = 0; i < ctx->keylen; i++) - ctx->tfm.state[i] = cpu_to_le32(aes_key[i]); - /* Same as crypto_gcm_setkey() from crypto/gcm.c */ crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & @@ -1001,10 +1021,11 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (err) goto out; - hash_state = key_state + ctx->keylen; - - for (i = 0; i < 4; i++) - hash_state[i] = cpu_to_be32(data->hash[i]); + /* Write key into state buffer */ + mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); + /* Write key(H) into state buffer */ + mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash, + AES_BLOCK_SIZE); out: kzfree(data); return err; @@ -1092,58 +1113,36 @@ static struct aead_alg aes_gcm_alg = { }, }; -static void mtk_aes_enc_task(unsigned long data) +static void mtk_aes_queue_task(unsigned long data) { - struct mtk_cryp *cryp = (struct mtk_cryp *)data; - struct mtk_aes_rec *aes = cryp->aes[0]; + struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; - mtk_aes_unmap(cryp, aes); - aes->resume(cryp, aes); + mtk_aes_handle_queue(aes->cryp, aes->id, NULL); } -static void mtk_aes_dec_task(unsigned long data) +static void mtk_aes_done_task(unsigned long data) { - struct mtk_cryp *cryp = (struct mtk_cryp *)data; - struct mtk_aes_rec *aes = cryp->aes[1]; + struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; + struct mtk_cryp *cryp = aes->cryp; mtk_aes_unmap(cryp, aes); aes->resume(cryp, aes); } -static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id) +static irqreturn_t mtk_aes_irq(int irq, void *dev_id) { - struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id; - struct mtk_aes_rec *aes = cryp->aes[0]; - u32 val = mtk_aes_read(cryp, RDR_STAT(RING0)); + struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; + struct mtk_cryp *cryp = aes->cryp; + u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); - mtk_aes_write(cryp, RDR_STAT(RING0), val); + mtk_aes_write(cryp, RDR_STAT(aes->id), val); if (likely(AES_FLAGS_BUSY & aes->flags)) { - mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST); - mtk_aes_write(cryp, RDR_THRESH(RING0), + mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); + mtk_aes_write(cryp, RDR_THRESH(aes->id), MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); - tasklet_schedule(&aes->task); - } else { - dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); - } - return IRQ_HANDLED; -} - -static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id) -{ - struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id; - struct mtk_aes_rec *aes = cryp->aes[1]; - u32 val = mtk_aes_read(cryp, RDR_STAT(RING1)); - - mtk_aes_write(cryp, RDR_STAT(RING1), val); - - if (likely(AES_FLAGS_BUSY & aes->flags)) { - mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST); - mtk_aes_write(cryp, RDR_THRESH(RING1), - MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); - - tasklet_schedule(&aes->task); + tasklet_schedule(&aes->done_task); } else { dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); } @@ -1171,14 +1170,20 @@ static int mtk_aes_record_init(struct mtk_cryp *cryp) if (!aes[i]->buf) goto err_cleanup; - aes[i]->id = i; + aes[i]->cryp = cryp; spin_lock_init(&aes[i]->lock); crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); + + tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, + (unsigned long)aes[i]); + tasklet_init(&aes[i]->done_task, mtk_aes_done_task, + (unsigned long)aes[i]); } - tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp); - tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp); + /* Link to ring0 and ring1 respectively */ + aes[0]->id = MTK_RING0; + aes[1]->id = MTK_RING1; return 0; @@ -1196,7 +1201,9 @@ static void mtk_aes_record_free(struct mtk_cryp *cryp) int i; for (i = 0; i < MTK_REC_NUM; i++) { - tasklet_kill(&cryp->aes[i]->task); + tasklet_kill(&cryp->aes[i]->done_task); + tasklet_kill(&cryp->aes[i]->queue_task); + free_page((unsigned long)cryp->aes[i]->buf); kfree(cryp->aes[i]); } @@ -1246,25 +1253,23 @@ int mtk_cipher_alg_register(struct mtk_cryp *cryp) if (ret) goto err_record; - /* Ring0 is use by encryption record */ - ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq, - IRQF_TRIGGER_LOW, "mtk-aes", cryp); + ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, + 0, "mtk-aes", cryp->aes[0]); if (ret) { - dev_err(cryp->dev, "unable to request AES encryption irq.\n"); + dev_err(cryp->dev, "unable to request AES irq.\n"); goto err_res; } - /* Ring1 is use by decryption record */ - ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq, - IRQF_TRIGGER_LOW, "mtk-aes", cryp); + ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, + 0, "mtk-aes", cryp->aes[1]); if (ret) { - dev_err(cryp->dev, "unable to request AES decryption irq.\n"); + dev_err(cryp->dev, "unable to request AES irq.\n"); goto err_res; } /* Enable ring0 and ring1 interrupt */ - mtk_aes_write(cryp, AIC_ENABLE_SET(RING0), MTK_IRQ_RDR0); - mtk_aes_write(cryp, AIC_ENABLE_SET(RING1), MTK_IRQ_RDR1); + mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0); + mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1); spin_lock(&mtk_aes.lock); list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); |