diff options
author | Gary R Hook <gary.hook@amd.com> | 2017-03-15 13:21:01 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2017-03-24 22:02:55 +0800 |
commit | 36cf515b9bbe298e1ce7384620f0d4ec45ad3328 (patch) | |
tree | 4772df5332d0e077e6eb51660a064c632ce3ec18 /drivers/crypto | |
parent | 990672d48515ce09c76fcf1ceccee48b0dd1942b (diff) | |
download | linux-36cf515b9bbe298e1ce7384620f0d4ec45ad3328.tar.bz2 |
crypto: ccp - Enable support for AES GCM on v5 CCPs
A version 5 device provides the primitive commands
required for AES GCM. This patch adds support for
en/decryption.
Signed-off-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/ccp/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes-galois.c | 252 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-main.c | 12 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto.h | 14 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 252 |
5 files changed, 531 insertions, 0 deletions
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index d2044b793aa8..60919a3ec53b 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -12,5 +12,6 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ ccp-crypto-aes-cmac.o \ ccp-crypto-aes-xts.o \ + ccp-crypto-aes-galois.o \ ccp-crypto-des3.o \ ccp-crypto-sha.o diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c new file mode 100644 index 000000000000..38ee6f348ea9 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -0,0 +1,252 @@ +/* + * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Author: Gary R Hook <gary.hook@amd.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/internal/aead.h> +#include <crypto/algapi.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/scatterwalk.h> +#include <linux/delay.h> + +#include "ccp-crypto.h" + +#define AES_GCM_IVSIZE 12 + +static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) +{ + return ret; +} + +static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + + switch (key_len) { + case AES_KEYSIZE_128: + ctx->u.aes.type = CCP_AES_TYPE_128; + break; + case AES_KEYSIZE_192: + ctx->u.aes.type = CCP_AES_TYPE_192; + break; + case AES_KEYSIZE_256: + ctx->u.aes.type = CCP_AES_TYPE_256; + break; + default: + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ctx->u.aes.mode = CCP_AES_MODE_GCM; + ctx->u.aes.key_len = key_len; + + memcpy(ctx->u.aes.key, key, key_len); + sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); + + return 0; +} + +static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return 0; +} + +static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + struct ccp_aes_req_ctx *rctx = aead_request_ctx(req); + struct scatterlist *iv_sg = NULL; + unsigned int iv_len = 0; + int i; + int ret = 0; + + if (!ctx->u.aes.key_len) + return -EINVAL; + + if (ctx->u.aes.mode != CCP_AES_MODE_GCM) + return -EINVAL; + + if (!req->iv) + return -EINVAL; + + /* + * 5 parts: + * plaintext/ciphertext input + * AAD + * key + * IV + * Destination+tag buffer + */ + + /* Prepare the IV: 12 bytes + an integer (counter) */ + memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE); + for (i = 0; i < 3; i++) + rctx->iv[i + AES_GCM_IVSIZE] = 0; + rctx->iv[AES_BLOCK_SIZE - 1] = 1; + + /* Set up a scatterlist for the IV */ + iv_sg = &rctx->iv_sg; + iv_len = AES_BLOCK_SIZE; + sg_init_one(iv_sg, rctx->iv, iv_len); + + /* The AAD + plaintext are concatenated in the src buffer */ + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_AES; + rctx->cmd.u.aes.type = ctx->u.aes.type; + rctx->cmd.u.aes.mode = ctx->u.aes.mode; + rctx->cmd.u.aes.action = encrypt; + rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; + rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; + rctx->cmd.u.aes.iv = iv_sg; + rctx->cmd.u.aes.iv_len = iv_len; + rctx->cmd.u.aes.src = req->src; + rctx->cmd.u.aes.src_len = req->cryptlen; + rctx->cmd.u.aes.aad_len = req->assoclen; + + /* The cipher text + the tag are in the dst buffer */ + rctx->cmd.u.aes.dst = req->dst; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_aes_gcm_encrypt(struct aead_request *req) +{ + return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT); +} + +static int ccp_aes_gcm_decrypt(struct aead_request *req) +{ + return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT); +} + +static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm) +{ + struct ccp_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->complete = ccp_aes_gcm_complete; + ctx->u.aes.key_len = 0; + + crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); + + return 0; +} + +static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm) +{ +} + +static struct aead_alg ccp_aes_gcm_defaults = { + .setkey = ccp_aes_gcm_setkey, + .setauthsize = ccp_aes_gcm_setauthsize, + .encrypt = ccp_aes_gcm_encrypt, + .decrypt = ccp_aes_gcm_decrypt, + .init = ccp_aes_gcm_cra_init, + .ivsize = AES_GCM_IVSIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_type = &crypto_ablkcipher_type, + .cra_exit = ccp_aes_gcm_cra_exit, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_aes_aead_def { + enum ccp_aes_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + struct aead_alg *alg_defaults; +}; + +static struct ccp_aes_aead_def aes_aead_algs[] = { + { + .mode = CCP_AES_MODE_GHASH, + .version = CCP_VERSION(5, 0), + .name = "gcm(aes)", + .driver_name = "gcm-aes-ccp", + .blocksize = 1, + .ivsize = AES_BLOCK_SIZE, + .alg_defaults = &ccp_aes_gcm_defaults, + }, +}; + +static int ccp_register_aes_aead(struct list_head *head, + const struct ccp_aes_aead_def *def) +{ + struct ccp_crypto_aead *ccp_aead; + struct aead_alg *alg; + int ret; + + ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL); + if (!ccp_aead) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_aead->entry); + + ccp_aead->mode = def->mode; + + /* Copy the defaults and override as necessary */ + alg = &ccp_aead->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->base.cra_ablkcipher.ivsize = def->ivsize; + + ret = crypto_register_aead(alg); + if (ret) { + pr_err("%s ablkcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_aead); + return ret; + } + + list_add(&ccp_aead->entry, head); + + return 0; +} + +int ccp_register_aes_aeads(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) { + if (aes_aead_algs[i].version > ccpversion) + continue; + ret = ccp_register_aes_aead(head, &aes_aead_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 3f1e36d7a8bf..8dccbddabef1 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -40,6 +40,7 @@ MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(cipher_algs); +static LIST_HEAD(aead_algs); /* For any tfm, requests for that tfm must be returned on the order * received. With multiple queues available, the CCP can process more @@ -339,6 +340,10 @@ static int ccp_register_algs(void) ret = ccp_register_aes_xts_algs(&cipher_algs); if (ret) return ret; + + ret = ccp_register_aes_aeads(&aead_algs); + if (ret) + return ret; } if (!des3_disable) { @@ -360,6 +365,7 @@ static void ccp_unregister_algs(void) { struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; + struct ccp_crypto_aead *aead_alg, *aead_tmp; list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { crypto_unregister_ahash(&ahash_alg->alg); @@ -372,6 +378,12 @@ static void ccp_unregister_algs(void) list_del(&ablk_alg->entry); kfree(ablk_alg); } + + list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) { + crypto_unregister_aead(&aead_alg->alg); + list_del(&aead_alg->entry); + kfree(aead_alg); + } } static int ccp_crypto_init(void) diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 8c8bd3ff9f48..dd5bf15f06e5 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -19,6 +19,8 @@ #include <linux/ccp.h> #include <crypto/algapi.h> #include <crypto/aes.h> +#include <crypto/internal/aead.h> +#include <crypto/aead.h> #include <crypto/ctr.h> #include <crypto/hash.h> #include <crypto/sha.h> @@ -35,6 +37,14 @@ struct ccp_crypto_ablkcipher_alg { struct crypto_alg alg; }; +struct ccp_crypto_aead { + struct list_head entry; + + u32 mode; + + struct aead_alg alg; +}; + struct ccp_crypto_ahash_alg { struct list_head entry; @@ -97,6 +107,9 @@ struct ccp_aes_req_ctx { struct scatterlist iv_sg; u8 iv[AES_BLOCK_SIZE]; + struct scatterlist tag_sg; + u8 tag[AES_BLOCK_SIZE]; + /* Fields used for RFC3686 requests */ u8 *rfc3686_info; u8 rfc3686_iv[AES_BLOCK_SIZE]; @@ -233,6 +246,7 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, int ccp_register_aes_algs(struct list_head *head); int ccp_register_aes_cmac_algs(struct list_head *head); int ccp_register_aes_xts_algs(struct list_head *head); +int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 0de961a22ab4..c0dfdacbdff5 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -601,6 +601,255 @@ e_key: return ret; } +static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, + struct ccp_cmd *cmd) +{ + struct ccp_aes_engine *aes = &cmd->u.aes; + struct ccp_dm_workarea key, ctx, final_wa, tag; + struct ccp_data src, dst; + struct ccp_data aad; + struct ccp_op op; + + unsigned long long *final; + unsigned int dm_offset; + unsigned int ilen; + bool in_place = true; /* Default value */ + int ret; + + struct scatterlist *p_inp, sg_inp[2]; + struct scatterlist *p_tag, sg_tag[2]; + struct scatterlist *p_outp, sg_outp[2]; + struct scatterlist *p_aad; + + if (!aes->iv) + return -EINVAL; + + if (!((aes->key_len == AES_KEYSIZE_128) || + (aes->key_len == AES_KEYSIZE_192) || + (aes->key_len == AES_KEYSIZE_256))) + return -EINVAL; + + if (!aes->key) /* Gotta have a key SGL */ + return -EINVAL; + + /* First, decompose the source buffer into AAD & PT, + * and the destination buffer into AAD, CT & tag, or + * the input into CT & tag. + * It is expected that the input and output SGs will + * be valid, even if the AAD and input lengths are 0. + */ + p_aad = aes->src; + p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); + p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); + if (aes->action == CCP_AES_ACTION_ENCRYPT) { + ilen = aes->src_len; + p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); + } else { + /* Input length for decryption includes tag */ + ilen = aes->src_len - AES_BLOCK_SIZE; + p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + op.init = 1; + op.u.aes.type = aes->type; + + /* Copy the key to the LSB */ + ret = ccp_init_dm_workarea(&key, cmd_q, + CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, + DMA_TO_DEVICE); + if (ret) + return ret; + + dm_offset = CCP_SB_BYTES - aes->key_len; + ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_key; + } + + /* Copy the context (IV) to the LSB. + * There is an assumption here that the IV is 96 bits in length, plus + * a nonce of 32 bits. If no IV is present, use a zeroed buffer. + */ + ret = ccp_init_dm_workarea(&ctx, cmd_q, + CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, + DMA_BIDIRECTIONAL); + if (ret) + goto e_key; + + dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; + ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + + op.init = 1; + if (aes->aad_len > 0) { + /* Step 1: Run a GHASH over the Additional Authenticated Data */ + ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + op.u.aes.mode = CCP_AES_MODE_GHASH; + op.u.aes.action = CCP_AES_GHASHAAD; + + while (aad.sg_wa.bytes_left) { + ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); + + ret = cmd_q->ccp->vdata->perform->aes(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_aad; + } + + ccp_process_data(&aad, NULL, &op); + op.init = 0; + } + } + + op.u.aes.mode = CCP_AES_MODE_GCTR; + op.u.aes.action = aes->action; + + if (ilen > 0) { + /* Step 2: Run a GCTR over the plaintext */ + in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; + + ret = ccp_init_data(&src, cmd_q, p_inp, ilen, + AES_BLOCK_SIZE, + in_place ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, + AES_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + op.soc = 0; + op.eom = 0; + op.init = 1; + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) { + unsigned int nbytes = aes->src_len + % AES_BLOCK_SIZE; + + if (nbytes) { + op.eom = 1; + op.u.aes.size = (nbytes * 8) - 1; + } + } + + ret = cmd_q->ccp->vdata->perform->aes(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ccp_process_data(&src, &dst, &op); + op.init = 0; + } + } + + /* Step 3: Update the IV portion of the context with the original IV */ + ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + /* Step 4: Concatenate the lengths of the AAD and source, and + * hash that 16 byte buffer. + */ + ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + final = (unsigned long long *) final_wa.address; + final[0] = cpu_to_be64(aes->aad_len * 8); + final[1] = cpu_to_be64(ilen * 8); + + op.u.aes.mode = CCP_AES_MODE_GHASH; + op.u.aes.action = CCP_AES_GHASHFINAL; + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.address = final_wa.dma.address; + op.src.u.dma.length = AES_BLOCK_SIZE; + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.address = final_wa.dma.address; + op.dst.u.dma.length = AES_BLOCK_SIZE; + op.eom = 1; + op.u.aes.size = 0; + ret = cmd_q->ccp->vdata->perform->aes(&op); + if (ret) + goto e_dst; + + if (aes->action == CCP_AES_ACTION_ENCRYPT) { + /* Put the ciphered tag after the ciphertext. */ + ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); + } else { + /* Does this ciphered tag match the input? */ + ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + goto e_tag; + ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); + + ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); + ccp_dm_free(&tag); + } + +e_tag: + ccp_dm_free(&final_wa); + +e_dst: + if (aes->src_len && !in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + if (aes->src_len) + ccp_free_data(&src, cmd_q); + +e_aad: + if (aes->aad_len) + ccp_free_data(&aad, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + +e_key: + ccp_dm_free(&key); + + return ret; +} + static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; @@ -614,6 +863,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (aes->mode == CCP_AES_MODE_CMAC) return ccp_run_aes_cmac_cmd(cmd_q, cmd); + if (aes->mode == CCP_AES_MODE_GCM) + return ccp_run_aes_gcm_cmd(cmd_q, cmd); + if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) |