summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 13:53:32 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 13:53:32 -0800
commitb71acb0e372160167bf6d5500b88b30b52ccef6e (patch)
tree218e4b2752336ae38ffed12b67e89ed7995db931 /drivers/crypto
parente0c38a4d1f196a4b17d2eba36afff8f656a4f1de (diff)
parentc79b411eaa7257204f89c30651c45cea22278769 (diff)
downloadlinux-b71acb0e372160167bf6d5500b88b30b52ccef6e.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add 1472-byte test to tcrypt for IPsec - Reintroduced crypto stats interface with numerous changes - Support incremental algorithm dumps Algorithms: - Add xchacha12/20 - Add nhpoly1305 - Add adiantum - Add streebog hash - Mark cts(cbc(aes)) as FIPS allowed Drivers: - Improve performance of arm64/chacha20 - Improve performance of x86/chacha20 - Add NEON-accelerated nhpoly1305 - Add SSE2 accelerated nhpoly1305 - Add AVX2 accelerated nhpoly1305 - Add support for 192/256-bit keys in gcmaes AVX - Add SG support in gcmaes AVX - ESN for inline IPsec tx in chcr - Add support for CryptoCell 703 in ccree - Add support for CryptoCell 713 in ccree - Add SM4 support in ccree - Add SM3 support in ccree - Add support for chacha20 in caam/qi2 - Add support for chacha20 + poly1305 in caam/jr - Add support for chacha20 + poly1305 in caam/qi2 - Add AEAD cipher support in cavium/nitrox" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (130 commits) crypto: skcipher - remove remnants of internal IV generators crypto: cavium/nitrox - Fix build with !CONFIG_DEBUG_FS crypto: salsa20-generic - don't unnecessarily use atomic walk crypto: skcipher - add might_sleep() to skcipher_walk_virt() crypto: x86/chacha - avoid sleeping under kernel_fpu_begin() crypto: cavium/nitrox - Added AEAD cipher support crypto: mxc-scc - fix build warnings on ARM64 crypto: api - document missing stats member crypto: user - remove unused dump functions crypto: chelsio - Fix wrong error counter increments crypto: chelsio - Reset counters on cxgb4 Detach crypto: chelsio - Handle PCI shutdown event crypto: chelsio - cleanup:send addr as value in function argument crypto: chelsio - Use same value for both channel in single WR crypto: chelsio - Swap location of AAD and IV sent in WR crypto: chelsio - remove set but not used variable 'kctx_len' crypto: ux500 - Use proper enum in hash_set_dma_transfer crypto: ux500 - Use proper enum in cryp_set_dma_transfer crypto: aesni - Add scatter/gather avx stubs, and use them in C crypto: aesni - Introduce partial block macro ..
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/bcm/cipher.c9
-rw-r--r--drivers/crypto/caam/caamalg.c266
-rw-r--r--drivers/crypto/caam/caamalg_desc.c139
-rw-r--r--drivers/crypto/caam/caamalg_desc.h5
-rw-r--r--drivers/crypto/caam/caamalg_qi.c37
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c156
-rw-r--r--drivers/crypto/caam/caamhash.c20
-rw-r--r--drivers/crypto/caam/caampkc.c10
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c28
-rw-r--r--drivers/crypto/caam/desc.h28
-rw-r--r--drivers/crypto/caam/desc_constr.h7
-rw-r--r--drivers/crypto/caam/regs.h74
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile5
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c364
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_algs.c456
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h12
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c48
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h21
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h74
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c114
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c92
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c22
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c204
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.h9
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h326
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c302
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c498
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c94
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c4
-rw-r--r--drivers/crypto/ccree/cc_aead.c35
-rw-r--r--drivers/crypto/ccree/cc_cipher.c104
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h4
-rw-r--r--drivers/crypto/ccree/cc_driver.c50
-rw-r--r--drivers/crypto/ccree/cc_driver.h15
-rw-r--r--drivers/crypto/ccree/cc_hash.c189
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c418
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h2
-rw-r--r--drivers/crypto/chelsio/chcr_core.c195
-rw-r--r--drivers/crypto/chelsio/chcr_core.h44
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h10
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c183
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c5
-rw-r--r--drivers/crypto/mxc-scc.c12
-rw-r--r--drivers/crypto/mxs-dcp.c28
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c1
-rw-r--r--drivers/crypto/omap-aes.c3
-rw-r--r--drivers/crypto/omap-des.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c3
-rw-r--r--drivers/crypto/qce/ablkcipher.c1
-rw-r--r--drivers/crypto/qce/sha.c1
-rw-r--r--drivers/crypto/sahara.c1
-rw-r--r--drivers/crypto/talitos.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
65 files changed, 3518 insertions, 1290 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index caa98a7fe392..d80751d48cf1 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -762,10 +762,12 @@ config CRYPTO_DEV_CCREE
select CRYPTO_ECB
select CRYPTO_CTR
select CRYPTO_XTS
+ select CRYPTO_SM4
+ select CRYPTO_SM3
help
Say 'Y' to enable a driver for the REE interface of the Arm
TrustZone CryptoCell family of processors. Currently the
- CryptoCell 712, 710 and 630 are supported.
+ CryptoCell 713, 703, 712, 710 and 630 are supported.
Choose this if you wish to use hardware acceleration of
cryptographic operations on the system REE.
If unsure say Y.
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index f5c07498ea4f..4092c2aad8e2 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -520,8 +520,7 @@ static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
uint8_t src[16] = { 0 };
int rc = 0;
- aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(aes_tfm)) {
rc = PTR_ERR(aes_tfm);
pr_warn("could not load aes cipher driver: %d\n", rc);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2d1f1db9f807..c9393ffb70ed 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3868,7 +3868,6 @@ static struct iproc_alg_s driver_algs[] = {
.cra_driver_name = "ctr-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ablkcipher = {
- /* .geniv = "chainiv", */
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -4605,7 +4604,6 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
crypto->cra_priority = cipher_pri;
crypto->cra_alignmask = 0;
crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
- INIT_LIST_HEAD(&crypto->cra_list);
crypto->cra_init = ablkcipher_cra_init;
crypto->cra_exit = generic_cra_exit;
@@ -4652,12 +4650,16 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
- hash->setkey = ahash_setkey;
hash->init = ahash_init;
hash->update = ahash_update;
hash->final = ahash_final;
hash->finup = ahash_finup;
hash->digest = ahash_digest;
+ if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
+ ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
+ (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
+ hash->setkey = ahash_setkey;
+ }
} else {
hash->setkey = ahash_hmac_setkey;
hash->init = ahash_hmac_init;
@@ -4687,7 +4689,6 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_priority = aead_pri;
aead->base.cra_alignmask = 0;
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- INIT_LIST_HEAD(&aead->base.cra_list);
aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 869f092432de..92e593e2069a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -72,6 +72,8 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
+#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
+
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -513,6 +515,61 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, true, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), ctx->dir);
+
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, false, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), ctx->dir);
+
+ return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+ if (keylen != CHACHA_KEY_SIZE + saltlen) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.key_virt = key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+}
+
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
@@ -1031,6 +1088,40 @@ static void init_gcm_job(struct aead_request *req,
/* End of blank commands */
}
+static void init_chachapoly_job(struct aead_request *req,
+ struct aead_edesc *edesc, bool all_contig,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int assoclen = req->assoclen;
+ u32 *desc = edesc->hw_desc;
+ u32 ctx_iv_off = 4;
+
+ init_aead_job(req, edesc, all_contig, encrypt);
+
+ if (ivsize != CHACHAPOLY_IV_SIZE) {
+ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
+ ctx_iv_off += 4;
+
+ /*
+ * The associated data comes already with the IV but we need
+ * to skip it when we authenticate or encrypt...
+ */
+ assoclen -= ivsize;
+ }
+
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
+
+ /*
+ * For IPsec load the IV further in the same register.
+ * For RFC7539 simply load the 12 bytes nonce in a single operation
+ */
+ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ctx_iv_off << LDST_OFFSET_SHIFT);
+}
+
static void init_authenc_job(struct aead_request *req,
struct aead_edesc *edesc,
bool all_contig, bool encrypt)
@@ -1289,6 +1380,72 @@ static int gcm_encrypt(struct aead_request *req)
return ret;
}
+static int chachapoly_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret;
+
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
+ true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ desc = edesc->hw_desc;
+
+ init_chachapoly_job(req, edesc, all_contig, true);
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int chachapoly_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret;
+
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
+ false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ desc = edesc->hw_desc;
+
+ init_chachapoly_job(req, edesc, all_contig, false);
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
static int ipsec_gcm_encrypt(struct aead_request *req)
{
if (req->assoclen < 8)
@@ -3002,6 +3159,50 @@ static struct caam_aead_alg driver_aeads[] = {
.geniv = true,
},
},
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
+ "caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = chachapoly_encrypt,
+ .decrypt = chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539esp-chacha20-"
+ "poly1305-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = chachapoly_encrypt,
+ .decrypt = chachapoly_decrypt,
+ .ivsize = 8,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
};
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
@@ -3135,7 +3336,7 @@ static int __init caam_algapi_init(void)
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -3168,14 +3369,38 @@ static int __init caam_algapi_init(void)
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ if (priv->era < 10) {
+ u32 cha_vid, cha_inst;
+
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ ccha_inst = 0;
+ ptha_inst = 0;
+ } else {
+ u32 aesa, mdha;
+
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ aes_inst = aesa & CHA_VER_NUM_MASK;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ }
/* If MD is present, limit digest size based on LP256 */
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
@@ -3196,10 +3421,10 @@ static int __init caam_algapi_init(void)
* Check support for AES modes not available
* on LP devices.
*/
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_XTS)
- continue;
+ if (aes_vid == CHA_VER_VID_AES_LP &&
+ (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
caam_skcipher_alg_init(t_alg);
@@ -3232,21 +3457,28 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
+ continue;
+
+ /* Skip POLY1305 algorithms if not supported by device */
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
+ continue;
+
/*
* Check support for AES algorithms not available
* on LP devices.
*/
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if (alg_aai == OP_ALG_AAI_GCM)
- continue;
+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ continue;
/*
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if (c2_alg_sel &&
- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
- continue;
+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ (!md_inst || t_alg->aead.maxauthsize > md_limit))
+ continue;
caam_aead_alg_init(t_alg);
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 1a6f0da14106..7db1640d3577 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1213,6 +1213,139 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+/**
+ * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
+ * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
+ * descriptor (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
+ * OP_ALG_AAI_AEAD.
+ * @adata: pointer to authentication transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
+ * OP_ALG_AAI_AEAD.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @encap: true if encapsulation, false if decapsulation
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool encap,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *wait_cmd;
+ u32 nfifo;
+ const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* For IPsec load the salt from keymat in the context register */
+ if (is_ipsec)
+ append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
+ LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
+ 4 << LDST_OFFSET_SHIFT);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 and 1 operations: Poly & ChaCha */
+ if (encap) {
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ } else {
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ }
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+ u32 ctx1_iv_off = is_ipsec ? 8 : 4;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ 4 << LDST_OFFSET_SHIFT);
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ctx1_iv_off << LDST_OFFSET_SHIFT);
+ }
+
+ /*
+ * MAGIC with NFIFO
+ * Read associated data from the input and send them to class1 and
+ * class2 alignment blocks. From class1 send data to output fifo and
+ * then write it to memory since we don't need to encrypt AD.
+ */
+ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
+ NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
+ append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
+ FIFOLD_CLASS_CLASS1 | LDST_VLF);
+ append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
+ MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* IPsec - copy IV at the output */
+ if (is_ipsec)
+ append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
+ 0x2 << 25);
+
+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, wait_cmd);
+
+ if (encap) {
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+ CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ } else {
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0,
+ CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
+ CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV for verification */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ }
+
+ print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
+
/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
static inline void skcipher_append_src_dst(u32 *desc)
{
@@ -1228,7 +1361,8 @@ static inline void skcipher_append_src_dst(u32 *desc)
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ * - OP_ALG_ALGSEL_CHACHA20
* @ivsize: initialization vector size
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
@@ -1293,7 +1427,8 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ * - OP_ALG_ALGSEL_CHACHA20
* @ivsize: initialization vector size
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 1315c8f6f951..d5ca42ff961a 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -96,6 +96,11 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool encap,
+ const bool is_qi);
+
void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
const u32 ctx1_iv_off);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 23c9fc4975f8..c0d55310aade 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -2462,7 +2462,7 @@ static int __init caam_qi_algapi_init(void)
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -2497,14 +2497,34 @@ static int __init caam_qi_algapi_init(void)
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ if (priv->era < 10) {
+ u32 cha_vid, cha_inst;
+
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ } else {
+ u32 aesa, mdha;
+
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ aes_inst = aesa & CHA_VER_NUM_MASK;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ }
/* If MD is present, limit digest size based on LP256 */
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
@@ -2556,8 +2576,7 @@ static int __init caam_qi_algapi_init(void)
* Check support for AES algorithms not available
* on LP devices.
*/
- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
- (alg_aai == OP_ALG_AAI_GCM))
+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
continue;
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 7d8ac0222fa3..425d5d974613 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -462,7 +462,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- edesc->assoclen = cpu_to_caam32(req->assoclen);
+ if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
+ /*
+ * The associated data comes already with the IV but we need
+ * to skip it when we authenticate or encrypt...
+ */
+ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
+ else
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
@@ -532,6 +540,68 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return edesc;
}
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, true, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, false, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+ if (keylen != CHACHA_KEY_SIZE + saltlen) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.key_virt = key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+}
+
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -816,7 +886,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
+ OP_ALG_AAI_CTR_MOD128) &&
+ ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
+ OP_ALG_ALGSEL_CHACHA20);
const bool is_rfc3686 = alg->caam.rfc3686;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
@@ -1494,7 +1566,23 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
- }
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "chacha20-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
+ },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -2611,6 +2699,50 @@ static struct caam_aead_alg driver_aeads[] = {
{
.aead = {
.base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
+ "caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539esp-chacha20-"
+ "poly1305-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 8,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
+ {
+ .aead = {
+ .base = {
.cra_name = "authenc(hmac(sha512),"
"rfc3686(ctr(aes)))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -4908,6 +5040,11 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
alg_sel == OP_ALG_ALGSEL_AES)
continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+ !priv->sec_attr.ccha_acc_num)
+ continue;
+
t_alg->caam.dev = dev;
caam_skcipher_alg_init(t_alg);
@@ -4940,11 +5077,22 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
c1_alg_sel == OP_ALG_ALGSEL_AES)
continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+ !priv->sec_attr.ccha_acc_num)
+ continue;
+
+ /* Skip POLY1305 algorithms if not supported by device */
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
+ !priv->sec_attr.ptha_acc_num)
+ continue;
+
/*
* Skip algorithms requiring message digests
* if MD not supported by device.
*/
- if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ !priv->sec_attr.md_acc_num)
continue;
t_alg->caam.dev = dev;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 46924affa0bd..81712aa5d0f2 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,6 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -1801,7 +1802,7 @@ static int __init caam_algapi_hash_init(void)
int i = 0, err = 0;
struct caam_drv_private *priv;
unsigned int md_limit = SHA512_DIGEST_SIZE;
- u32 cha_inst, cha_vid;
+ u32 md_inst, md_vid;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -1831,18 +1832,27 @@ static int __init caam_algapi_hash_init(void)
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ if (priv->era < 10) {
+ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ } else {
+ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ }
/*
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+ if (!md_inst)
return -ENODEV;
/* Limit digest size based on LP256 */
- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+ if (md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
INIT_LIST_HEAD(&hash_list);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 4fc209cbbeab..77ab28a2811a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,6 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -1017,7 +1018,7 @@ static int __init caam_pkc_init(void)
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
- u32 cha_inst, pk_inst;
+ u32 pk_inst;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -1045,8 +1046,11 @@ static int __init caam_pkc_init(void)
return -ENODEV;
/* Determine public key hardware accelerator presence. */
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+ if (priv->era < 10)
+ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+ else
+ pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
if (!pk_inst)
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 4318b0aa6fb9..a387c8d49a62 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,6 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -309,6 +310,7 @@ static int __init caam_rng_init(void)
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
+ u32 rng_inst;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -336,7 +338,13 @@ static int __init caam_rng_init(void)
return -ENODEV;
/* Check for an instantiated RNG before registration */
- if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+ if (priv->era < 10)
+ rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+
+ if (!rng_inst)
return -ENODEV;
dev = caam_jr_alloc();
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 9604ff7a335e..87d9efe4c7aa 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -36,6 +36,8 @@
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 3fc793193821..16bbc72f041a 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,6 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#include <linux/device.h>
@@ -106,7 +107,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
struct caam_deco __iomem *deco = ctrlpriv->deco;
unsigned int timeout = 100000;
- u32 deco_dbg_reg, flags;
+ u32 deco_dbg_reg, deco_state, flags;
int i;
@@ -149,13 +150,22 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
timeout = 10000000;
do {
deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+
+ if (ctrlpriv->era < 10)
+ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
+ DESC_DBG_DECO_STAT_SHIFT;
+ else
+ deco_state = (rd_reg32(&deco->dbg_exec) &
+ DESC_DER_DECO_STAT_MASK) >>
+ DESC_DER_DECO_STAT_SHIFT;
+
/*
* If an error occured in the descriptor, then
* the DECO status field will be set to 0x0D
*/
- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
- DESC_DBG_DECO_STAT_HOST_ERR)
+ if (deco_state == DECO_STAT_HOST_ERR)
break;
+
cpu_relax();
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
@@ -491,7 +501,7 @@ static int caam_probe(struct platform_device *pdev)
struct caam_perfmon *perfmon;
#endif
u32 scfgr, comp_params;
- u32 cha_vid_ls;
+ u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
@@ -733,15 +743,19 @@ static int caam_probe(struct platform_device *pdev)
goto caam_remove;
}
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+ if (ctrlpriv->era < 10)
+ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
+ CHA_VER_VID_SHIFT;
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
* In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!ctrlpriv->mc_en &&
- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!ctrlpriv->mc_en && rng_vid >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index f76ff160a02c..ec10230178c5 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,6 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#ifndef DESC_H
@@ -242,6 +243,7 @@
#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
/* Offset in source/destination */
@@ -284,6 +286,12 @@
#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+/* Special Length definitions when dst=sm, nfifo-{sm,m} */
+#define LDLEN_MATH0 0
+#define LDLEN_MATH1 1
+#define LDLEN_MATH2 2
+#define LDLEN_MATH3 3
+
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
* Command Constructs
@@ -408,6 +416,7 @@
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
@@ -1133,6 +1142,12 @@
#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
+/* version register fields */
+#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
+#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
+#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
+#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
+
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
@@ -1152,6 +1167,8 @@
#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_AAI_SHIFT 4
#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
@@ -1199,6 +1216,11 @@
#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+/* Chacha20 AAI set */
+#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
+
/* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
@@ -1387,6 +1409,7 @@
#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
#define MOVE_DEST_SHIFT 16
#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
@@ -1413,6 +1436,10 @@
#define MOVELEN_MRSEL_SHIFT 0
#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
/*
* MATH Command Constructs
@@ -1589,6 +1616,7 @@
#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d4256fa4a1d6..2980b8ef1fb1 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * const desc, u32 options) \
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
+APPEND_CMD_RET(move_len, MOVE_LEN)
static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
@@ -327,7 +328,11 @@ static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
- append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+ if (options & LDST_LEN_MASK) \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options); \
+ else \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | \
+ sizeof(type)); \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 457815f965c0..3cd0822ea819 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -3,6 +3,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#ifndef REGS_H
@@ -211,6 +212,47 @@ struct jr_outentry {
u32 jrstatus; /* Status for completed descriptor */
} __packed;
+/* Version registers (Era 10+) e80-eff */
+struct version_regs {
+ u32 crca; /* CRCA_VERSION */
+ u32 afha; /* AFHA_VERSION */
+ u32 kfha; /* KFHA_VERSION */
+ u32 pkha; /* PKHA_VERSION */
+ u32 aesa; /* AESA_VERSION */
+ u32 mdha; /* MDHA_VERSION */
+ u32 desa; /* DESA_VERSION */
+ u32 snw8a; /* SNW8A_VERSION */
+ u32 snw9a; /* SNW9A_VERSION */
+ u32 zuce; /* ZUCE_VERSION */
+ u32 zuca; /* ZUCA_VERSION */
+ u32 ccha; /* CCHA_VERSION */
+ u32 ptha; /* PTHA_VERSION */
+ u32 rng; /* RNG_VERSION */
+ u32 trng; /* TRNG_VERSION */
+ u32 aaha; /* AAHA_VERSION */
+ u32 rsvd[10];
+ u32 sr; /* SR_VERSION */
+ u32 dma; /* DMA_VERSION */
+ u32 ai; /* AI_VERSION */
+ u32 qi; /* QI_VERSION */
+ u32 jr; /* JR_VERSION */
+ u32 deco; /* DECO_VERSION */
+};
+
+/* Version registers bitfields */
+
+/* Number of CHAs instantiated */
+#define CHA_VER_NUM_MASK 0xffull
+/* CHA Miscellaneous Information */
+#define CHA_VER_MISC_SHIFT 8
+#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
+/* CHA Revision Number */
+#define CHA_VER_REV_SHIFT 16
+#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
+/* CHA Version ID */
+#define CHA_VER_VID_SHIFT 24
+#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
@@ -223,15 +265,13 @@ struct jr_outentry {
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
/*
- * CHA version IDs / instantiation bitfields
+ * CHA version IDs / instantiation bitfields (< Era 10)
* Defined for use with the cha_id fields in perfmon, but the same shift/mask
* selectors can be used to pull out the number of instantiated blocks within
* cha_num fields in perfmon because the locations are the same.
*/
#define CHA_ID_LS_AES_SHIFT 0
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
#define CHA_ID_LS_DES_SHIFT 4
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
@@ -241,9 +281,6 @@ struct jr_outentry {
#define CHA_ID_LS_MD_SHIFT 12
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
#define CHA_ID_LS_RNG_SHIFT 16
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -269,6 +306,13 @@ struct jr_outentry {
#define CHA_ID_MS_JR_SHIFT 28
#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
+/* Specific CHA version IDs */
+#define CHA_VER_VID_AES_LP 0x3ull
+#define CHA_VER_VID_AES_HP 0x4ull
+#define CHA_VER_VID_MD_LP256 0x0ull
+#define CHA_VER_VID_MD_LP512 0x1ull
+#define CHA_VER_VID_MD_HP 0x2ull
+
struct sec_vid {
u16 ip_id;
u8 maj_rev;
@@ -479,8 +523,10 @@ struct caam_ctrl {
struct rng4tst r4tst[2];
};
- u32 rsvd9[448];
+ u32 rsvd9[416];
+ /* Version registers - introduced with era 10 e80-eff */
+ struct version_regs vreg;
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
@@ -570,8 +616,10 @@ struct caam_job_ring {
u32 rsvd11;
u32 jrcommand; /* JRCRx - JobR command */
- u32 rsvd12[932];
+ u32 rsvd12[900];
+ /* Version registers - introduced with era 10 e80-eff */
+ struct version_regs vreg;
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
@@ -878,13 +926,19 @@ struct caam_deco {
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
-#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
#define DESC_DBG_DECO_STAT_VALID 0x80000000
#define DESC_DBG_DECO_STAT_MASK 0x00F00000
+#define DESC_DBG_DECO_STAT_SHIFT 20
u32 desc_dbg; /* DxDDR - DECO Debug Register */
- u32 rsvd31[126];
+ u32 rsvd31[13];
+#define DESC_DER_DECO_STAT_MASK 0x000F0000
+#define DESC_DER_DECO_STAT_SHIFT 16
+ u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
+ u32 rsvd32[112];
};
+#define DECO_STAT_HOST_ERR 0xD
+
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index e12954791673..f83991aaf820 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -6,7 +6,10 @@ n5pf-objs := nitrox_main.o \
nitrox_lib.o \
nitrox_hal.o \
nitrox_reqmgr.o \
- nitrox_algs.o
+ nitrox_algs.o \
+ nitrox_mbx.o \
+ nitrox_skcipher.o \
+ nitrox_aead.o
n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
new file mode 100644
index 000000000000..4f43eacd2557
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/crypto.h>
+#include <linux/rtnetlink.h>
+
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/gcm.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_req.h"
+
+#define GCM_AES_SALT_SIZE 4
+
+/**
+ * struct nitrox_crypt_params - Params to set nitrox crypto request.
+ * @cryptlen: Encryption/Decryption data length
+ * @authlen: Assoc data length + Cryptlen
+ * @srclen: Input buffer length
+ * @dstlen: Output buffer length
+ * @iv: IV data
+ * @ivsize: IV data length
+ * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
+ */
+struct nitrox_crypt_params {
+ unsigned int cryptlen;
+ unsigned int authlen;
+ unsigned int srclen;
+ unsigned int dstlen;
+ u8 *iv;
+ int ivsize;
+ u8 ctrl_arg;
+};
+
+union gph_p3 {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 iv_offset : 8;
+ u16 auth_offset : 8;
+#else
+ u16 auth_offset : 8;
+ u16 iv_offset : 8;
+#endif
+ };
+ u16 param;
+};
+
+static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ int aes_keylen;
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx;
+ union fc_ctx_flags flags;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* fill crypto context */
+ fctx = nctx->u.fctx;
+ flags.f = be64_to_cpu(fctx->flags.f);
+ flags.w0.aes_keylen = aes_keylen;
+ fctx->flags.f = cpu_to_be64(flags.f);
+
+ /* copy enc key to context */
+ memset(&fctx->crypto, 0, sizeof(fctx->crypto));
+ memcpy(fctx->crypto.u.key, key, keylen);
+
+ return 0;
+}
+
+static int nitrox_aead_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ union fc_ctx_flags flags;
+
+ flags.f = be64_to_cpu(fctx->flags.f);
+ flags.w0.mac_len = authsize;
+ fctx->flags.f = cpu_to_be64(flags.f);
+
+ aead->authsize = authsize;
+
+ return 0;
+}
+
+static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
+ int buflen)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ int nents = sg_nents_for_len(areq->src, buflen) + 1;
+ int ret;
+
+ if (nents < 0)
+ return nents;
+
+ /* Allocate buffer to hold IV and input scatterlist array */
+ ret = alloc_src_req_buf(nkreq, nents, ivsize);
+ if (ret)
+ return ret;
+
+ nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
+
+ return 0;
+}
+
+static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ int nents = sg_nents_for_len(areq->dst, buflen) + 3;
+ int ret;
+
+ if (nents < 0)
+ return nents;
+
+ /* Allocate buffer to hold ORH, COMPLETION and output scatterlist
+ * array
+ */
+ ret = alloc_dst_req_buf(nkreq, nents);
+ if (ret)
+ return ret;
+
+ nitrox_creq_set_orh(nkreq);
+ nitrox_creq_set_comp(nkreq);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
+
+ return 0;
+}
+
+static void free_src_sglist(struct aead_request *areq)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct aead_request *areq)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+
+ kfree(nkreq->dst);
+}
+
+static int nitrox_set_creq(struct aead_request *areq,
+ struct nitrox_crypt_params *params)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ union gph_p3 param3;
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ int ret;
+
+ creq->flags = areq->base.flags;
+ creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ creq->ctrl.value = 0;
+ creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ creq->ctrl.s.arg = params->ctrl_arg;
+
+ creq->gph.param0 = cpu_to_be16(params->cryptlen);
+ creq->gph.param1 = cpu_to_be16(params->authlen);
+ creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
+ param3.iv_offset = 0;
+ param3.auth_offset = params->ivsize;
+ creq->gph.param3 = cpu_to_be16(param3.param);
+
+ creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
+
+ ret = alloc_src_sglist(areq, params->iv, params->ivsize,
+ params->srclen);
+ if (ret)
+ return ret;
+
+ ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
+ if (ret) {
+ free_src_sglist(areq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nitrox_aead_callback(void *arg, int err)
+{
+ struct aead_request *areq = arg;
+
+ free_src_sglist(areq);
+ free_dst_sglist(areq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int nitrox_aes_gcm_enc(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ struct nitrox_crypt_params params;
+ int ret;
+
+ memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
+
+ memset(&params, 0, sizeof(params));
+ params.cryptlen = areq->cryptlen;
+ params.authlen = areq->assoclen + params.cryptlen;
+ params.srclen = params.authlen;
+ params.dstlen = params.srclen + aead->authsize;
+ params.iv = &areq->iv[GCM_AES_SALT_SIZE];
+ params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ params.ctrl_arg = ENCRYPT;
+ ret = nitrox_set_creq(areq, &params);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback,
+ areq);
+}
+
+static int nitrox_aes_gcm_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ struct nitrox_crypt_params params;
+ int ret;
+
+ memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
+
+ memset(&params, 0, sizeof(params));
+ params.cryptlen = areq->cryptlen - aead->authsize;
+ params.authlen = areq->assoclen + params.cryptlen;
+ params.srclen = areq->cryptlen + areq->assoclen;
+ params.dstlen = params.srclen - aead->authsize;
+ params.iv = &areq->iv[GCM_AES_SALT_SIZE];
+ params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ params.ctrl_arg = DECRYPT;
+ ret = nitrox_set_creq(areq, &params);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback,
+ areq);
+}
+
+static int nitrox_aead_init(struct crypto_aead *aead)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct crypto_ctx_hdr *chdr;
+
+ /* get the first device */
+ nctx->ndev = nitrox_get_first_device();
+ if (!nctx->ndev)
+ return -ENODEV;
+
+ /* allocate nitrox crypto context */
+ chdr = crypto_alloc_context(nctx->ndev);
+ if (!chdr) {
+ nitrox_put_device(nctx->ndev);
+ return -ENOMEM;
+ }
+ nctx->chdr = chdr;
+ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+ sizeof(struct ctx_hdr));
+ nctx->u.fctx->flags.f = 0;
+
+ return 0;
+}
+
+static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+{
+ int ret;
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ union fc_ctx_flags *flags;
+
+ ret = nitrox_aead_init(aead);
+ if (ret)
+ return ret;
+
+ flags = &nctx->u.fctx->flags;
+ flags->w0.cipher_type = CIPHER_AES_GCM;
+ flags->w0.hash_type = AUTH_NULL;
+ flags->w0.iv_source = IV_FROM_DPTR;
+ /* ask microcode to calculate ipad/opad */
+ flags->w0.auth_input_type = 1;
+ flags->f = be64_to_cpu(flags->f);
+
+ crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
+ sizeof(struct nitrox_kcrypt_request));
+
+ return 0;
+}
+
+static void nitrox_aead_exit(struct crypto_aead *aead)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+
+ /* free the nitrox crypto context */
+ if (nctx->u.ctx_handle) {
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+
+ memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys));
+ memzero_explicit(&fctx->auth, sizeof(struct auth_keys));
+ crypto_free_context((void *)nctx->chdr);
+ }
+ nitrox_put_device(nctx->ndev);
+
+ nctx->u.ctx_handle = 0;
+ nctx->ndev = NULL;
+}
+
+static struct aead_alg nitrox_aeads[] = { {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "n5_aes_gcm",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .setkey = nitrox_aes_gcm_setkey,
+ .setauthsize = nitrox_aead_setauthsize,
+ .encrypt = nitrox_aes_gcm_enc,
+ .decrypt = nitrox_aes_gcm_dec,
+ .init = nitrox_aes_gcm_init,
+ .exit = nitrox_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+} };
+
+int nitrox_register_aeads(void)
+{
+ return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads));
+}
+
+void nitrox_unregister_aeads(void)
+{
+ crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
index 2ae6124e5da6..d646ae5f29b0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -1,458 +1,24 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-
-#include <crypto/aes.h>
-#include <crypto/skcipher.h>
-#include <crypto/ctr.h>
-#include <crypto/des.h>
-#include <crypto/xts.h>
-
-#include "nitrox_dev.h"
#include "nitrox_common.h"
-#include "nitrox_req.h"
-
-#define PRIO 4001
-
-struct nitrox_cipher {
- const char *name;
- enum flexi_cipher value;
-};
-
-/**
- * supported cipher list
- */
-static const struct nitrox_cipher flexi_cipher_table[] = {
- { "null", CIPHER_NULL },
- { "cbc(des3_ede)", CIPHER_3DES_CBC },
- { "ecb(des3_ede)", CIPHER_3DES_ECB },
- { "cbc(aes)", CIPHER_AES_CBC },
- { "ecb(aes)", CIPHER_AES_ECB },
- { "cfb(aes)", CIPHER_AES_CFB },
- { "rfc3686(ctr(aes))", CIPHER_AES_CTR },
- { "xts(aes)", CIPHER_AES_XTS },
- { "cts(cbc(aes))", CIPHER_AES_CBC_CTS },
- { NULL, CIPHER_INVALID }
-};
-
-static enum flexi_cipher flexi_cipher_type(const char *name)
-{
- const struct nitrox_cipher *cipher = flexi_cipher_table;
-
- while (cipher->name) {
- if (!strcmp(cipher->name, name))
- break;
- cipher++;
- }
- return cipher->value;
-}
-
-static int flexi_aes_keylen(int keylen)
-{
- int aes_keylen;
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- aes_keylen = 1;
- break;
- case AES_KEYSIZE_192:
- aes_keylen = 2;
- break;
- case AES_KEYSIZE_256:
- aes_keylen = 3;
- break;
- default:
- aes_keylen = -EINVAL;
- break;
- }
- return aes_keylen;
-}
-
-static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
-{
- struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
- void *fctx;
-
- /* get the first device */
- nctx->ndev = nitrox_get_first_device();
- if (!nctx->ndev)
- return -ENODEV;
-
- /* allocate nitrox crypto context */
- fctx = crypto_alloc_context(nctx->ndev);
- if (!fctx) {
- nitrox_put_device(nctx->ndev);
- return -ENOMEM;
- }
- nctx->u.ctx_handle = (uintptr_t)fctx;
- crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
- sizeof(struct nitrox_kcrypt_request));
- return 0;
-}
-
-static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
-{
- struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
-
- /* free the nitrox crypto context */
- if (nctx->u.ctx_handle) {
- struct flexi_crypto_context *fctx = nctx->u.fctx;
-
- memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
- memset(&fctx->auth, 0, sizeof(struct auth_keys));
- crypto_free_context((void *)fctx);
- }
- nitrox_put_device(nctx->ndev);
-
- nctx->u.ctx_handle = 0;
- nctx->ndev = NULL;
-}
-static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
- int aes_keylen, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
- struct flexi_crypto_context *fctx;
- enum flexi_cipher cipher_type;
- const char *name;
-
- name = crypto_tfm_alg_name(tfm);
- cipher_type = flexi_cipher_type(name);
- if (unlikely(cipher_type == CIPHER_INVALID)) {
- pr_err("unsupported cipher: %s\n", name);
- return -EINVAL;
- }
-
- /* fill crypto context */
- fctx = nctx->u.fctx;
- fctx->flags = 0;
- fctx->w0.cipher_type = cipher_type;
- fctx->w0.aes_keylen = aes_keylen;
- fctx->w0.iv_source = IV_FROM_DPTR;
- fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0);
- /* copy the key to context */
- memcpy(fctx->crypto.u.key, key, keylen);
-
- return 0;
-}
-
-static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int keylen)
+int nitrox_crypto_register(void)
{
- int aes_keylen;
+ int err;
- aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
-}
+ err = nitrox_register_skciphers();
+ if (err)
+ return err;
-static void nitrox_skcipher_callback(struct skcipher_request *skreq,
- int err)
-{
+ err = nitrox_register_aeads();
if (err) {
- pr_err_ratelimited("request failed status 0x%0x\n", err);
- err = -EINVAL;
- }
- skcipher_request_complete(skreq, err);
-}
-
-static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
- struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
- int ivsize = crypto_skcipher_ivsize(cipher);
- struct se_crypto_request *creq;
-
- creq = &nkreq->creq;
- creq->flags = skreq->base.flags;
- creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
-
- /* fill the request */
- creq->ctrl.value = 0;
- creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
- creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
- /* param0: length of the data to be encrypted */
- creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
- creq->gph.param1 = 0;
- /* param2: encryption data offset */
- creq->gph.param2 = cpu_to_be16(ivsize);
- creq->gph.param3 = 0;
-
- creq->ctx_handle = nctx->u.ctx_handle;
- creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
-
- /* copy the iv */
- memcpy(creq->iv, skreq->iv, ivsize);
- creq->ivsize = ivsize;
- creq->src = skreq->src;
- creq->dst = skreq->dst;
-
- nkreq->nctx = nctx;
- nkreq->skreq = skreq;
-
- /* send the crypto request */
- return nitrox_process_se_request(nctx->ndev, creq,
- nitrox_skcipher_callback, skreq);
-}
-
-static int nitrox_aes_encrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, true);
-}
-
-static int nitrox_aes_decrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, false);
-}
-
-static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return nitrox_skcipher_setkey(cipher, 0, key, keylen);
-}
-
-static int nitrox_3des_encrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, true);
-}
-
-static int nitrox_3des_decrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, false);
-}
-
-static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
- struct flexi_crypto_context *fctx;
- int aes_keylen, ret;
-
- ret = xts_check_key(tfm, key, keylen);
- if (ret)
- return ret;
-
- keylen /= 2;
-
- aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ nitrox_unregister_skciphers();
+ return err;
}
- fctx = nctx->u.fctx;
- /* copy KEY2 */
- memcpy(fctx->auth.u.key2, (key + keylen), keylen);
-
- return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
-}
-
-static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
- struct flexi_crypto_context *fctx;
- int aes_keylen;
-
- if (keylen < CTR_RFC3686_NONCE_SIZE)
- return -EINVAL;
-
- fctx = nctx->u.fctx;
-
- memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
- CTR_RFC3686_NONCE_SIZE);
-
- keylen -= CTR_RFC3686_NONCE_SIZE;
-
- aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
-}
-
-static struct skcipher_alg nitrox_skciphers[] = { {
- .base = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "n5_cbc(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "n5_ecb(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "n5_cfb(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "n5_xts(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_xts_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "n5_rfc3686(ctr(aes))",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
- .setkey = nitrox_aes_ctr_rfc3686_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
-}, {
- .base = {
- .cra_name = "cts(cbc(aes))",
- .cra_driver_name = "n5_cts(cbc(aes))",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "n5_cbc(des3_ede)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = nitrox_3des_setkey,
- .encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "n5_ecb(des3_ede)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = nitrox_3des_setkey,
- .encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}
-
-};
-
-int nitrox_crypto_register(void)
-{
- return crypto_register_skciphers(nitrox_skciphers,
- ARRAY_SIZE(nitrox_skciphers));
+ return 0;
}
void nitrox_crypto_unregister(void)
{
- crypto_unregister_skciphers(nitrox_skciphers,
- ARRAY_SIZE(nitrox_skciphers));
+ nitrox_unregister_aeads();
+ nitrox_unregister_skciphers();
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 863143a8336b..e4be69d7e6e5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -7,6 +7,10 @@
int nitrox_crypto_register(void);
void nitrox_crypto_unregister(void);
+int nitrox_register_aeads(void);
+void nitrox_unregister_aeads(void);
+int nitrox_register_skciphers(void);
+void nitrox_unregister_skciphers(void);
void *crypto_alloc_context(struct nitrox_device *ndev);
void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
@@ -19,7 +23,7 @@ void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
- struct skcipher_request *skreq);
+ void *cb_arg);
void backlog_qflush_work(struct work_struct *work);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 1ad27b1a87c5..a2a452642b38 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -54,7 +54,13 @@
#define NPS_STATS_PKT_DMA_WR_CNT 0x1000190
/* NPS packet registers */
-#define NPS_PKT_INT 0x1040018
+#define NPS_PKT_INT 0x1040018
+#define NPS_PKT_MBOX_INT_LO 0x1040020
+#define NPS_PKT_MBOX_INT_LO_ENA_W1C 0x1040030
+#define NPS_PKT_MBOX_INT_LO_ENA_W1S 0x1040038
+#define NPS_PKT_MBOX_INT_HI 0x1040040
+#define NPS_PKT_MBOX_INT_HI_ENA_W1C 0x1040050
+#define NPS_PKT_MBOX_INT_HI_ENA_W1S 0x1040058
#define NPS_PKT_IN_RERR_HI 0x1040108
#define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120
#define NPS_PKT_IN_RERR_LO 0x1040128
@@ -74,6 +80,10 @@
#define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240
#define NPS_PKT_SLC_ERR_TYPE 0x1040248
#define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260
+/* Mailbox PF->VF PF Accessible Data registers */
+#define NPS_PKT_MBOX_PF_VF_PFDATAX(_i) (0x1040800 + ((_i) * 0x8))
+#define NPS_PKT_MBOX_VF_PF_PFDATAX(_i) (0x1040C00 + ((_i) * 0x8))
+
#define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000))
#define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000))
#define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000))
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
index 5f3cd5fafe04..0196b992280f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -13,18 +13,7 @@ static int firmware_show(struct seq_file *s, void *v)
return 0;
}
-static int firmware_open(struct inode *inode, struct file *file)
-{
- return single_open(file, firmware_show, inode->i_private);
-}
-
-static const struct file_operations firmware_fops = {
- .owner = THIS_MODULE,
- .open = firmware_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(firmware);
static int device_show(struct seq_file *s, void *v)
{
@@ -41,18 +30,7 @@ static int device_show(struct seq_file *s, void *v)
return 0;
}
-static int nitrox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device);
static int stats_show(struct seq_file *s, void *v)
{
@@ -69,18 +47,7 @@ static int stats_show(struct seq_file *s, void *v)
return 0;
}
-static int nitrox_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stats_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_stats_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(stats);
void nitrox_debugfs_exit(struct nitrox_device *ndev)
{
@@ -97,13 +64,16 @@ int nitrox_debugfs_init(struct nitrox_device *ndev)
return -ENOMEM;
ndev->debugfs_dir = dir;
- f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ f = debugfs_create_file("firmware", 0400, dir, ndev,
+ &firmware_fops);
if (!f)
goto err;
- f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
+ f = debugfs_create_file("device", 0400, dir, ndev,
+ &device_fops);
if (!f)
goto err;
- f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
+ f = debugfs_create_file("stats", 0400, dir, ndev,
+ &stats_fops);
if (!f)
goto err;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
new file mode 100644
index 000000000000..a8d85ffa619c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __NITROX_DEBUGFS_H
+#define __NITROX_DEBUGFS_H
+
+#include "nitrox_dev.h"
+
+#ifdef CONFIG_DEBUG_FS
+int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_exit(struct nitrox_device *ndev);
+#else
+static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ return 0;
+}
+
+static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+}
+#endif /* !CONFIG_DEBUG_FS */
+
+#endif /* __NITROX_DEBUGFS_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 283e252385fb..0338877b828f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -8,6 +8,8 @@
#include <linux/if.h>
#define VERSION_LEN 32
+/* Maximum queues in PF mode */
+#define MAX_PF_QUEUES 64
/**
* struct nitrox_cmdq - NITROX command queue
@@ -103,6 +105,61 @@ struct nitrox_q_vector {
};
};
+/**
+ * mbox_msg - Mailbox message data
+ * @type: message type
+ * @opcode: message opcode
+ * @data: message data
+ */
+union mbox_msg {
+ u64 value;
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 data: 58;
+ };
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 chipid: 8;
+ u64 vfid: 8;
+ } id;
+};
+
+/**
+ * nitrox_vfdev - NITROX VF device instance in PF
+ * @state: VF device state
+ * @vfno: VF number
+ * @nr_queues: number of queues enabled in VF
+ * @ring: ring to communicate with VF
+ * @msg: Mailbox message data from VF
+ * @mbx_resp: Mailbox counters
+ */
+struct nitrox_vfdev {
+ atomic_t state;
+ int vfno;
+ int nr_queues;
+ int ring;
+ union mbox_msg msg;
+ atomic64_t mbx_resp;
+};
+
+/**
+ * struct nitrox_iov - SR-IOV information
+ * @num_vfs: number of VF(s) enabled
+ * @max_vf_queues: Maximum number of queues allowed for VF
+ * @vfdev: VF(s) devices
+ * @pf2vf_wq: workqueue for PF2VF communication
+ * @msix: MSI-X entry for PF in SR-IOV case
+ */
+struct nitrox_iov {
+ int num_vfs;
+ int max_vf_queues;
+ struct nitrox_vfdev *vfdev;
+ struct workqueue_struct *pf2vf_wq;
+ struct msix_entry msix;
+};
+
/*
* NITROX Device states
*/
@@ -150,6 +207,9 @@ enum vf_mode {
* @ctx_pool: DMA pool for crypto context
* @pkt_inq: Packet input rings
* @qvec: MSI-X queue vectors information
+ * @iov: SR-IOV informatin
+ * @num_vecs: number of MSI-X vectors
+ * @stats: request statistics
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
@@ -168,13 +228,13 @@ struct nitrox_device {
int node;
u16 qlen;
u16 nr_queues;
- int num_vfs;
enum vf_mode mode;
struct dma_pool *ctx_pool;
struct nitrox_cmdq *pkt_inq;
struct nitrox_q_vector *qvec;
+ struct nitrox_iov iov;
int num_vecs;
struct nitrox_stats stats;
@@ -213,17 +273,9 @@ static inline bool nitrox_ready(struct nitrox_device *ndev)
return atomic_read(&ndev->state) == __NDEV_READY;
}
-#ifdef CONFIG_DEBUG_FS
-int nitrox_debugfs_init(struct nitrox_device *ndev);
-void nitrox_debugfs_exit(struct nitrox_device *ndev);
-#else
-static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
{
- return 0;
+ return atomic_read(&vfdev->state) == __NDEV_READY;
}
-static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{ }
-#endif
-
#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index a9b82387cf53..c08d9f33a3b1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -5,10 +5,11 @@
#include "nitrox_csr.h"
#define PLL_REF_CLK 50
+#define MAX_CSR_RETRIES 10
/**
* emu_enable_cores - Enable EMU cluster cores.
- * @ndev: N5 device
+ * @ndev: NITROX device
*/
static void emu_enable_cores(struct nitrox_device *ndev)
{
@@ -33,7 +34,7 @@ static void emu_enable_cores(struct nitrox_device *ndev)
/**
* nitrox_config_emu_unit - configure EMU unit.
- * @ndev: N5 device
+ * @ndev: NITROX device
*/
void nitrox_config_emu_unit(struct nitrox_device *ndev)
{
@@ -63,29 +64,26 @@ void nitrox_config_emu_unit(struct nitrox_device *ndev)
static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
{
union nps_pkt_in_instr_ctl pkt_in_ctl;
- union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
union nps_pkt_in_done_cnts pkt_in_cnts;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
+ /* step 1: disable the ring, clear enable bit */
offset = NPS_PKT_IN_INSTR_CTLX(ring);
- /* disable the ring */
pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
pkt_in_ctl.s.enb = 0;
nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
- usleep_range(100, 150);
- /* wait to clear [ENB] */
+ /* step 2: wait to clear [ENB] */
+ usleep_range(100, 150);
do {
pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
- } while (pkt_in_ctl.s.enb);
-
- /* clear off door bell counts */
- offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
- pkt_in_dbell.value = 0;
- pkt_in_dbell.s.dbell = 0xffffffff;
- nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
+ if (!pkt_in_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
- /* clear done counts */
+ /* step 3: clear done counts */
offset = NPS_PKT_IN_DONE_CNTSX(ring);
pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
@@ -95,6 +93,7 @@ static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
{
union nps_pkt_in_instr_ctl pkt_in_ctl;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
/* 64-byte instruction size */
@@ -107,12 +106,15 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
/* wait for set [ENB] */
do {
pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
- } while (!pkt_in_ctl.s.enb);
+ if (pkt_in_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
}
/**
* nitrox_config_pkt_input_rings - configure Packet Input Rings
- * @ndev: N5 device
+ * @ndev: NITROX device
*/
void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
{
@@ -121,11 +123,14 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
union nps_pkt_in_instr_rsize pkt_in_rsize;
+ union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
u64 offset;
reset_pkt_input_ring(ndev, i);
- /* configure ring base address 16-byte aligned,
+ /**
+ * step 4:
+ * configure ring base address 16-byte aligned,
* size and interrupt threshold.
*/
offset = NPS_PKT_IN_INSTR_BADDRX(i);
@@ -141,6 +146,13 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
offset = NPS_PKT_IN_INT_LEVELSX(i);
nitrox_write_csr(ndev, offset, 0xffffffff);
+ /* step 5: clear off door bell counts */
+ offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
+ pkt_in_dbell.value = 0;
+ pkt_in_dbell.s.dbell = 0xffffffff;
+ nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
+
+ /* enable the ring */
enable_pkt_input_ring(ndev, i);
}
}
@@ -149,21 +161,26 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
{
union nps_pkt_slc_ctl pkt_slc_ctl;
union nps_pkt_slc_cnts pkt_slc_cnts;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
- /* disable slc port */
+ /* step 1: disable slc port */
offset = NPS_PKT_SLC_CTLX(port);
pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
pkt_slc_ctl.s.enb = 0;
nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
- usleep_range(100, 150);
+ /* step 2 */
+ usleep_range(100, 150);
/* wait to clear [ENB] */
do {
pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
- } while (pkt_slc_ctl.s.enb);
+ if (!pkt_slc_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
- /* clear slc counters */
+ /* step 3: clear slc counters */
offset = NPS_PKT_SLC_CNTSX(port);
pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
@@ -173,12 +190,12 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
{
union nps_pkt_slc_ctl pkt_slc_ctl;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
offset = NPS_PKT_SLC_CTLX(port);
pkt_slc_ctl.value = 0;
pkt_slc_ctl.s.enb = 1;
-
/*
* 8 trailing 0x00 bytes will be added
* to the end of the outgoing packet.
@@ -191,23 +208,27 @@ void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
/* wait to set [ENB] */
do {
pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
- } while (!pkt_slc_ctl.s.enb);
+ if (pkt_slc_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
}
-static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
- int port)
+static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
{
union nps_pkt_slc_int_levels pkt_slc_int;
u64 offset;
reset_pkt_solicit_port(ndev, port);
+ /* step 4: configure interrupt levels */
offset = NPS_PKT_SLC_INT_LEVELSX(port);
pkt_slc_int.value = 0;
/* time interrupt threshold */
pkt_slc_int.s.timet = 0x3fffff;
nitrox_write_csr(ndev, offset, pkt_slc_int.value);
+ /* enable the solicit port */
enable_pkt_solicit_port(ndev, port);
}
@@ -216,12 +237,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
int i;
for (i = 0; i < ndev->nr_queues; i++)
- config_single_pkt_solicit_port(ndev, i);
+ config_pkt_solicit_port(ndev, i);
}
/**
* enable_nps_interrupts - enable NPS interrutps
- * @ndev: N5 device.
+ * @ndev: NITROX device.
*
* This includes NPS core, packet in and slc interrupts.
*/
@@ -284,8 +305,8 @@ void nitrox_config_pom_unit(struct nitrox_device *ndev)
}
/**
- * nitrox_config_rand_unit - enable N5 random number unit
- * @ndev: N5 device
+ * nitrox_config_rand_unit - enable NITROX random number unit
+ * @ndev: NITROX device
*/
void nitrox_config_rand_unit(struct nitrox_device *ndev)
{
@@ -361,6 +382,7 @@ void invalidate_lbc(struct nitrox_device *ndev)
{
union lbc_inval_ctl lbc_ctl;
union lbc_inval_status lbc_stat;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
/* invalidate LBC */
@@ -370,10 +392,12 @@ void invalidate_lbc(struct nitrox_device *ndev)
nitrox_write_csr(ndev, offset, lbc_ctl.value);
offset = LBC_INVAL_STATUS;
-
do {
lbc_stat.value = nitrox_read_csr(ndev, offset);
- } while (!lbc_stat.s.done);
+ if (lbc_stat.s.done)
+ break;
+ udelay(50);
+ } while (max_retries--);
}
void nitrox_config_lbc_unit(struct nitrox_device *ndev)
@@ -467,3 +491,31 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
/* copy partname */
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
}
+
+void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
+{
+ u64 value = ~0ULL;
+ u64 reg_addr;
+
+ /* Mailbox interrupt low enable set register */
+ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
+ nitrox_write_csr(ndev, reg_addr, value);
+
+ /* Mailbox interrupt high enable set register */
+ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
+ nitrox_write_csr(ndev, reg_addr, value);
+}
+
+void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
+{
+ u64 value = ~0ULL;
+ u64 reg_addr;
+
+ /* Mailbox interrupt low enable clear register */
+ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
+ nitrox_write_csr(ndev, reg_addr, value);
+
+ /* Mailbox interrupt high enable clear register */
+ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
+ nitrox_write_csr(ndev, reg_addr, value);
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
index 489ee64c119e..d6606418ba38 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -19,5 +19,7 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
void nitrox_get_hwinfo(struct nitrox_device *ndev);
+void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev);
+void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev);
#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index 88a77b8fb3fb..3dec570a190a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -7,12 +7,14 @@
#include "nitrox_csr.h"
#include "nitrox_common.h"
#include "nitrox_hal.h"
+#include "nitrox_mbx.h"
/**
* One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring
*/
#define NR_RING_VECTORS 3
+#define NR_NON_RING_VECTORS 1
/* base entry for packet ring/port */
#define PKT_RING_MSIX_BASE 0
#define NON_RING_MSIX_BASE 192
@@ -219,7 +221,8 @@ static void nps_core_int_tasklet(unsigned long data)
*/
static irqreturn_t nps_core_int_isr(int irq, void *data)
{
- struct nitrox_device *ndev = data;
+ struct nitrox_q_vector *qvec = data;
+ struct nitrox_device *ndev = qvec->ndev;
union nps_core_int_active core_int;
core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
@@ -245,6 +248,10 @@ static irqreturn_t nps_core_int_isr(int irq, void *data)
if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
+ /* Mailbox interrupt */
+ if (core_int.s.mbox)
+ nitrox_pf2vf_mbox_handler(ndev);
+
/* If more work callback the ISR, set resend */
core_int.s.resend = 1;
nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
@@ -275,6 +282,7 @@ void nitrox_unregister_interrupts(struct nitrox_device *ndev)
qvec->valid = false;
}
kfree(ndev->qvec);
+ ndev->qvec = NULL;
pci_free_irq_vectors(pdev);
}
@@ -321,6 +329,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
if (qvec->ring >= ndev->nr_queues)
break;
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
/* get the vector number */
vec = pci_irq_vector(pdev, i);
@@ -335,13 +344,13 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
(unsigned long)qvec);
- qvec->cmdq = &ndev->pkt_inq[qvec->ring];
qvec->valid = true;
}
/* request irqs for non ring vectors */
i = NON_RING_MSIX_BASE;
qvec = &ndev->qvec[i];
+ qvec->ndev = ndev;
snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
/* get the vector number */
@@ -356,7 +365,6 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
(unsigned long)qvec);
- qvec->ndev = ndev;
qvec->valid = true;
return 0;
@@ -365,3 +373,81 @@ irq_fail:
nitrox_unregister_interrupts(ndev);
return ret;
}
+
+void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int i;
+
+ for (i = 0; i < ndev->num_vecs; i++) {
+ struct nitrox_q_vector *qvec;
+ int vec;
+
+ qvec = ndev->qvec + i;
+ if (!qvec->valid)
+ continue;
+
+ vec = ndev->iov.msix.vector;
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, qvec);
+
+ tasklet_disable(&qvec->resp_tasklet);
+ tasklet_kill(&qvec->resp_tasklet);
+ qvec->valid = false;
+ }
+ kfree(ndev->qvec);
+ ndev->qvec = NULL;
+ pci_disable_msix(pdev);
+}
+
+int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ struct nitrox_q_vector *qvec;
+ int vec, cpu;
+ int ret;
+
+ /**
+ * only non ring vectors i.e Entry 192 is available
+ * for PF in SR-IOV mode.
+ */
+ ndev->iov.msix.entry = NON_RING_MSIX_BASE;
+ ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
+ if (ret) {
+ dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
+ NON_RING_MSIX_BASE);
+ return ret;
+ }
+
+ qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
+ if (!qvec) {
+ pci_disable_msix(pdev);
+ return -ENOMEM;
+ }
+ qvec->ndev = ndev;
+
+ ndev->qvec = qvec;
+ ndev->num_vecs = NR_NON_RING_VECTORS;
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
+ NON_RING_MSIX_BASE);
+
+ vec = ndev->iov.msix.vector;
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
+ NON_RING_MSIX_BASE);
+ goto iov_irq_fail;
+ }
+ cpu = num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
+
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
+ (unsigned long)qvec);
+ qvec->valid = true;
+
+ return 0;
+
+iov_irq_fail:
+ nitrox_sriov_unregister_interrupts(ndev);
+ return ret;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
index 63418a6cc52c..1062c9336c1f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -6,5 +6,7 @@
int nitrox_register_interrupts(struct nitrox_device *ndev);
void nitrox_unregister_interrupts(struct nitrox_device *ndev);
+int nitrox_sriov_register_interupts(struct nitrox_device *ndev);
+void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev);
#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 2260efa42308..9138bae12521 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
void *crypto_alloc_context(struct nitrox_device *ndev)
{
struct ctx_hdr *ctx;
+ struct crypto_ctx_hdr *chdr;
void *vaddr;
dma_addr_t dma;
+ chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
+ if (!chdr)
+ return NULL;
+
vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
- if (!vaddr)
+ if (!vaddr) {
+ kfree(chdr);
return NULL;
+ }
/* fill meta data */
ctx = vaddr;
@@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
ctx->dma = dma;
ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
- return ((u8 *)vaddr + sizeof(struct ctx_hdr));
+ chdr->pool = ndev->ctx_pool;
+ chdr->dma = dma;
+ chdr->vaddr = vaddr;
+
+ return chdr;
}
/**
@@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
*/
void crypto_free_context(void *ctx)
{
- struct ctx_hdr *ctxp;
+ struct crypto_ctx_hdr *ctxp;
if (!ctx)
return;
- ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
- dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
+ ctxp = ctx;
+ dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
+ kfree(ctxp);
}
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 6595c95af9f1..014e9863c20e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -1,6 +1,5 @@
#include <linux/aer.h>
#include <linux/delay.h>
-#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -13,9 +12,9 @@
#include "nitrox_csr.h"
#include "nitrox_hal.h"
#include "nitrox_isr.h"
+#include "nitrox_debugfs.h"
#define CNN55XX_DEV_ID 0x12
-#define MAX_PF_QUEUES 64
#define UCODE_HLEN 48
#define SE_GROUP 0
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
new file mode 100644
index 000000000000..02ee95064841
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/workqueue.h>
+
+#include "nitrox_csr.h"
+#include "nitrox_hal.h"
+#include "nitrox_dev.h"
+
+#define RING_TO_VFNO(_x, _y) ((_x) / (_y))
+
+/**
+ * mbx_msg_type - Mailbox message types
+ */
+enum mbx_msg_type {
+ MBX_MSG_TYPE_NOP,
+ MBX_MSG_TYPE_REQ,
+ MBX_MSG_TYPE_ACK,
+ MBX_MSG_TYPE_NACK,
+};
+
+/**
+ * mbx_msg_opcode - Mailbox message opcodes
+ */
+enum mbx_msg_opcode {
+ MSG_OP_VF_MODE = 1,
+ MSG_OP_VF_UP,
+ MSG_OP_VF_DOWN,
+ MSG_OP_CHIPID_VFID,
+};
+
+struct pf2vf_work {
+ struct nitrox_vfdev *vfdev;
+ struct nitrox_device *ndev;
+ struct work_struct pf2vf_resp;
+};
+
+static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
+{
+ u64 reg_addr;
+
+ reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
+ return nitrox_read_csr(ndev, reg_addr);
+}
+
+static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
+ int ring)
+{
+ u64 reg_addr;
+
+ reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
+ nitrox_write_csr(ndev, reg_addr, value);
+}
+
+static void pf2vf_send_response(struct nitrox_device *ndev,
+ struct nitrox_vfdev *vfdev)
+{
+ union mbox_msg msg;
+
+ msg.value = vfdev->msg.value;
+
+ switch (vfdev->msg.opcode) {
+ case MSG_OP_VF_MODE:
+ msg.data = ndev->mode;
+ break;
+ case MSG_OP_VF_UP:
+ vfdev->nr_queues = vfdev->msg.data;
+ atomic_set(&vfdev->state, __NDEV_READY);
+ break;
+ case MSG_OP_CHIPID_VFID:
+ msg.id.chipid = ndev->idx;
+ msg.id.vfid = vfdev->vfno;
+ break;
+ case MSG_OP_VF_DOWN:
+ vfdev->nr_queues = 0;
+ atomic_set(&vfdev->state, __NDEV_NOT_READY);
+ break;
+ default:
+ msg.type = MBX_MSG_TYPE_NOP;
+ break;
+ }
+
+ if (msg.type == MBX_MSG_TYPE_NOP)
+ return;
+
+ /* send ACK to VF */
+ msg.type = MBX_MSG_TYPE_ACK;
+ pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
+
+ vfdev->msg.value = 0;
+ atomic64_inc(&vfdev->mbx_resp);
+}
+
+static void pf2vf_resp_handler(struct work_struct *work)
+{
+ struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
+ pf2vf_resp);
+ struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
+ struct nitrox_device *ndev = pf2vf_resp->ndev;
+
+ switch (vfdev->msg.type) {
+ case MBX_MSG_TYPE_REQ:
+ /* process the request from VF */
+ pf2vf_send_response(ndev, vfdev);
+ break;
+ case MBX_MSG_TYPE_ACK:
+ case MBX_MSG_TYPE_NACK:
+ break;
+ };
+
+ kfree(pf2vf_resp);
+}
+
+void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
+{
+ struct nitrox_vfdev *vfdev;
+ struct pf2vf_work *pfwork;
+ u64 value, reg_addr;
+ u32 i;
+ int vfno;
+
+ /* loop for VF(0..63) */
+ reg_addr = NPS_PKT_MBOX_INT_LO;
+ value = nitrox_read_csr(ndev, reg_addr);
+ for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ /* get the vfno from ring */
+ vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
+ vfdev = ndev->iov.vfdev + vfno;
+ vfdev->ring = i;
+ /* fill the vf mailbox data */
+ vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
+ pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
+ if (!pfwork)
+ continue;
+
+ pfwork->vfdev = vfdev;
+ pfwork->ndev = ndev;
+ INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
+ queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
+ /* clear the corresponding vf bit */
+ nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
+ }
+
+ /* loop for VF(64..127) */
+ reg_addr = NPS_PKT_MBOX_INT_HI;
+ value = nitrox_read_csr(ndev, reg_addr);
+ for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ /* get the vfno from ring */
+ vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
+ vfdev = ndev->iov.vfdev + vfno;
+ vfdev->ring = (i + 64);
+ /* fill the vf mailbox data */
+ vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
+
+ pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
+ if (!pfwork)
+ continue;
+
+ pfwork->vfdev = vfdev;
+ pfwork->ndev = ndev;
+ INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
+ queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
+ /* clear the corresponding vf bit */
+ nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
+ }
+}
+
+int nitrox_mbox_init(struct nitrox_device *ndev)
+{
+ struct nitrox_vfdev *vfdev;
+ int i;
+
+ ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
+ sizeof(struct nitrox_vfdev), GFP_KERNEL);
+ if (!ndev->iov.vfdev)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->iov.num_vfs; i++) {
+ vfdev = ndev->iov.vfdev + i;
+ vfdev->vfno = i;
+ }
+
+ /* allocate pf2vf response workqueue */
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ if (!ndev->iov.pf2vf_wq) {
+ kfree(ndev->iov.vfdev);
+ return -ENOMEM;
+ }
+ /* enable pf2vf mailbox interrupts */
+ enable_pf2vf_mbox_interrupts(ndev);
+
+ return 0;
+}
+
+void nitrox_mbox_cleanup(struct nitrox_device *ndev)
+{
+ /* disable pf2vf mailbox interrupts */
+ disable_pf2vf_mbox_interrupts(ndev);
+ /* destroy workqueue */
+ if (ndev->iov.pf2vf_wq)
+ destroy_workqueue(ndev->iov.pf2vf_wq);
+
+ kfree(ndev->iov.vfdev);
+ ndev->iov.pf2vf_wq = NULL;
+ ndev->iov.vfdev = NULL;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
new file mode 100644
index 000000000000..5008399775a9
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __NITROX_MBX_H
+#define __NITROX_MBX_H
+
+int nitrox_mbox_init(struct nitrox_device *ndev);
+void nitrox_mbox_cleanup(struct nitrox_device *ndev);
+void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev);
+
+#endif /* __NITROX_MBX_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index d091b6f5f5dd..76c0f0be7233 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -7,6 +7,9 @@
#include "nitrox_dev.h"
+#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
+#define PRIO 4001
+
/**
* struct gphdr - General purpose Header
* @param0: first parameter.
@@ -46,13 +49,6 @@ union se_req_ctrl {
} s;
};
-struct nitrox_sglist {
- u16 len;
- u16 raz0;
- u32 raz1;
- dma_addr_t dma;
-};
-
#define MAX_IV_LEN 16
/**
@@ -62,8 +58,10 @@ struct nitrox_sglist {
* @ctx_handle: Crypto context handle.
* @gph: GP Header
* @ctrl: Request Information.
- * @in: Input sglist
- * @out: Output sglist
+ * @orh: ORH address
+ * @comp: completion address
+ * @src: Input sglist
+ * @dst: Output sglist
*/
struct se_crypto_request {
u8 opcode;
@@ -73,9 +71,8 @@ struct se_crypto_request {
struct gphdr gph;
union se_req_ctrl ctrl;
-
- u8 iv[MAX_IV_LEN];
- u16 ivsize;
+ u64 *orh;
+ u64 *comp;
struct scatterlist *src;
struct scatterlist *dst;
@@ -110,6 +107,18 @@ enum flexi_cipher {
CIPHER_INVALID
};
+enum flexi_auth {
+ AUTH_NULL = 0,
+ AUTH_MD5,
+ AUTH_SHA1,
+ AUTH_SHA2_SHA224,
+ AUTH_SHA2_SHA256,
+ AUTH_SHA2_SHA384,
+ AUTH_SHA2_SHA512,
+ AUTH_GMAC,
+ AUTH_INVALID
+};
+
/**
* struct crypto_keys - Crypto keys
* @key: Encryption key or KEY1 for AES-XTS
@@ -136,6 +145,32 @@ struct auth_keys {
u8 opad[64];
};
+union fc_ctx_flags {
+ __be64 f;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cipher_type : 4;
+ u64 reserved_59 : 1;
+ u64 aes_keylen : 2;
+ u64 iv_source : 1;
+ u64 hash_type : 4;
+ u64 reserved_49_51 : 3;
+ u64 auth_input_type: 1;
+ u64 mac_len : 8;
+ u64 reserved_0_39 : 40;
+#else
+ u64 reserved_0_39 : 40;
+ u64 mac_len : 8;
+ u64 auth_input_type: 1;
+ u64 reserved_49_51 : 3;
+ u64 hash_type : 4;
+ u64 iv_source : 1;
+ u64 aes_keylen : 2;
+ u64 reserved_59 : 1;
+ u64 cipher_type : 4;
+#endif
+ } w0;
+};
/**
* struct flexi_crypto_context - Crypto context
* @cipher_type: Encryption cipher type
@@ -150,49 +185,30 @@ struct auth_keys {
* @auth: Authentication keys
*/
struct flexi_crypto_context {
- union {
- __be64 flags;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 cipher_type : 4;
- u64 reserved_59 : 1;
- u64 aes_keylen : 2;
- u64 iv_source : 1;
- u64 hash_type : 4;
- u64 reserved_49_51 : 3;
- u64 auth_input_type: 1;
- u64 mac_len : 8;
- u64 reserved_0_39 : 40;
-#else
- u64 reserved_0_39 : 40;
- u64 mac_len : 8;
- u64 auth_input_type: 1;
- u64 reserved_49_51 : 3;
- u64 hash_type : 4;
- u64 iv_source : 1;
- u64 aes_keylen : 2;
- u64 reserved_59 : 1;
- u64 cipher_type : 4;
-#endif
- } w0;
- };
-
+ union fc_ctx_flags flags;
struct crypto_keys crypto;
struct auth_keys auth;
};
+struct crypto_ctx_hdr {
+ struct dma_pool *pool;
+ dma_addr_t dma;
+ void *vaddr;
+};
+
struct nitrox_crypto_ctx {
struct nitrox_device *ndev;
union {
u64 ctx_handle;
struct flexi_crypto_context *fctx;
} u;
+ struct crypto_ctx_hdr *chdr;
};
struct nitrox_kcrypt_request {
struct se_crypto_request creq;
- struct nitrox_crypto_ctx *nctx;
- struct skcipher_request *skreq;
+ u8 *src;
+ u8 *dst;
};
/**
@@ -369,26 +385,19 @@ struct nitrox_sgcomp {
/*
* strutct nitrox_sgtable - SG list information
- * @map_cnt: Number of buffers mapped
- * @nr_comp: Number of sglist components
+ * @sgmap_cnt: Number of buffers mapped
* @total_bytes: Total bytes in sglist.
- * @len: Total sglist components length.
- * @dma: DMA address of sglist component.
- * @dir: DMA direction.
- * @buf: crypto request buffer.
- * @sglist: SG list of input/output buffers.
+ * @sgcomp_len: Total sglist components length.
+ * @sgcomp_dma: DMA address of sglist component.
+ * @sg: crypto request buffer.
* @sgcomp: sglist component for NITROX.
*/
struct nitrox_sgtable {
- u8 map_bufs_cnt;
- u8 nr_sgcomp;
+ u8 sgmap_cnt;
u16 total_bytes;
- u32 len;
- dma_addr_t dma;
- enum dma_data_direction dir;
-
- struct scatterlist *buf;
- struct nitrox_sglist *sglist;
+ u32 sgcomp_len;
+ dma_addr_t sgcomp_dma;
+ struct scatterlist *sg;
struct nitrox_sgcomp *sgcomp;
};
@@ -398,13 +407,11 @@ struct nitrox_sgtable {
#define COMP_HLEN 8
struct resp_hdr {
- u64 orh;
- dma_addr_t orh_dma;
- u64 completion;
- dma_addr_t completion_dma;
+ u64 *orh;
+ u64 *completion;
};
-typedef void (*completion_t)(struct skcipher_request *skreq, int err);
+typedef void (*completion_t)(void *arg, int err);
/**
* struct nitrox_softreq - Represents the NIROX Request.
@@ -427,7 +434,6 @@ struct nitrox_softreq {
u32 flags;
gfp_t gfp;
atomic_t status;
- bool inplace;
struct nitrox_device *ndev;
struct nitrox_cmdq *cmdq;
@@ -440,7 +446,201 @@ struct nitrox_softreq {
unsigned long tstamp;
completion_t callback;
- struct skcipher_request *skreq;
+ void *cb_arg;
};
+static inline int flexi_aes_keylen(int keylen)
+{
+ int aes_keylen;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ aes_keylen = 1;
+ break;
+ case AES_KEYSIZE_192:
+ aes_keylen = 2;
+ break;
+ case AES_KEYSIZE_256:
+ aes_keylen = 3;
+ break;
+ default:
+ aes_keylen = -EINVAL;
+ break;
+ }
+ return aes_keylen;
+}
+
+static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp)
+{
+ size_t size;
+
+ size = sizeof(struct scatterlist) * nents;
+ size += extralen;
+
+ return kzalloc(size, gfp);
+}
+
+/**
+ * create_single_sg - Point SG entry to the data
+ * @sg: Destination SG list
+ * @buf: Data
+ * @buflen: Data length
+ *
+ * Returns next free entry in the destination SG list
+ **/
+static inline struct scatterlist *create_single_sg(struct scatterlist *sg,
+ void *buf, int buflen)
+{
+ sg_set_buf(sg, buf, buflen);
+ sg++;
+ return sg;
+}
+
+/**
+ * create_multi_sg - Create multiple sg entries with buflen data length from
+ * source sglist
+ * @to_sg: Destination SG list
+ * @from_sg: Source SG list
+ * @buflen: Data length
+ *
+ * Returns next free entry in the destination SG list
+ **/
+static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
+ struct scatterlist *from_sg,
+ int buflen)
+{
+ struct scatterlist *sg = to_sg;
+ unsigned int sglen;
+
+ for (; buflen; buflen -= sglen) {
+ sglen = from_sg->length;
+ if (sglen > buflen)
+ sglen = buflen;
+
+ sg_set_buf(sg, sg_virt(from_sg), sglen);
+ from_sg = sg_next(from_sg);
+ sg++;
+ }
+
+ return sg;
+}
+
+static inline void set_orh_value(u64 *orh)
+{
+ WRITE_ONCE(*orh, PENDING_SIG);
+}
+
+static inline void set_comp_value(u64 *comp)
+{
+ WRITE_ONCE(*comp, PENDING_SIG);
+}
+
+static inline int alloc_src_req_buf(struct nitrox_kcrypt_request *nkreq,
+ int nents, int ivsize)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp);
+ if (!nkreq->src)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void nitrox_creq_copy_iv(char *dst, char *src, int size)
+{
+ memcpy(dst, src, size);
+}
+
+static inline struct scatterlist *nitrox_creq_src_sg(char *iv, int ivsize)
+{
+ return (struct scatterlist *)(iv + ivsize);
+}
+
+static inline void nitrox_creq_set_src_sg(struct nitrox_kcrypt_request *nkreq,
+ int nents, int ivsize,
+ struct scatterlist *src, int buflen)
+{
+ char *iv = nkreq->src;
+ struct scatterlist *sg;
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ creq->src = nitrox_creq_src_sg(iv, ivsize);
+ sg = creq->src;
+ sg_init_table(sg, nents);
+
+ /* Input format:
+ * +----+----------------+
+ * | IV | SRC sg entries |
+ * +----+----------------+
+ */
+
+ /* IV */
+ sg = create_single_sg(sg, iv, ivsize);
+ /* SRC entries */
+ create_multi_sg(sg, src, buflen);
+}
+
+static inline int alloc_dst_req_buf(struct nitrox_kcrypt_request *nkreq,
+ int nents)
+{
+ int extralen = ORH_HLEN + COMP_HLEN;
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp);
+ if (!nkreq->dst)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void nitrox_creq_set_orh(struct nitrox_kcrypt_request *nkreq)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ creq->orh = (u64 *)(nkreq->dst);
+ set_orh_value(creq->orh);
+}
+
+static inline void nitrox_creq_set_comp(struct nitrox_kcrypt_request *nkreq)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ creq->comp = (u64 *)(nkreq->dst + ORH_HLEN);
+ set_comp_value(creq->comp);
+}
+
+static inline struct scatterlist *nitrox_creq_dst_sg(char *dst)
+{
+ return (struct scatterlist *)(dst + ORH_HLEN + COMP_HLEN);
+}
+
+static inline void nitrox_creq_set_dst_sg(struct nitrox_kcrypt_request *nkreq,
+ int nents, int ivsize,
+ struct scatterlist *dst, int buflen)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct scatterlist *sg;
+ char *iv = nkreq->src;
+
+ creq->dst = nitrox_creq_dst_sg(nkreq->dst);
+ sg = creq->dst;
+ sg_init_table(sg, nents);
+
+ /* Output format:
+ * +-----+----+----------------+-----------------+
+ * | ORH | IV | DST sg entries | COMPLETION Bytes|
+ * +-----+----+----------------+-----------------+
+ */
+
+ /* ORH */
+ sg = create_single_sg(sg, creq->orh, ORH_HLEN);
+ /* IV */
+ sg = create_single_sg(sg, iv, ivsize);
+ /* DST entries */
+ sg = create_multi_sg(sg, dst, buflen);
+ /* COMPLETION Bytes */
+ create_single_sg(sg, creq->comp, COMP_HLEN);
+}
+
#endif /* __NITROX_REQ_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 3987cd84c033..e34e4df8fd24 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -13,7 +13,6 @@
#define FDATA_SIZE 32
/* Base destination port for the solicited requests */
#define SOLICIT_BASE_DPORT 256
-#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define REQ_NOT_POSTED 1
#define REQ_BACKLOG 2
@@ -52,58 +51,26 @@ static inline int incr_index(int index, int count, int max)
return index;
}
-/**
- * dma_free_sglist - unmap and free the sg lists.
- * @ndev: N5 device
- * @sgtbl: SG table
- */
static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
{
struct nitrox_device *ndev = sr->ndev;
struct device *dev = DEV(ndev);
- struct nitrox_sglist *sglist;
-
- /* unmap in sgbuf */
- sglist = sr->in.sglist;
- if (!sglist)
- goto out_unmap;
-
- /* unmap iv */
- dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
- /* unmpa src sglist */
- dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
- /* unamp gather component */
- dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
- kfree(sr->in.sglist);
+
+
+ dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
+ DMA_TO_DEVICE);
kfree(sr->in.sgcomp);
- sr->in.sglist = NULL;
- sr->in.buf = NULL;
- sr->in.map_bufs_cnt = 0;
-
-out_unmap:
- /* unmap out sgbuf */
- sglist = sr->out.sglist;
- if (!sglist)
- return;
-
- /* unmap orh */
- dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
-
- /* unmap dst sglist */
- if (!sr->inplace) {
- dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
- sr->out.dir);
- }
- /* unmap completion */
- dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
+ sr->in.sg = NULL;
+ sr->in.sgmap_cnt = 0;
- /* unmap scatter component */
- dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
- kfree(sr->out.sglist);
+ dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
+ DMA_TO_DEVICE);
kfree(sr->out.sgcomp);
- sr->out.sglist = NULL;
- sr->out.buf = NULL;
- sr->out.map_bufs_cnt = 0;
+ sr->out.sg = NULL;
+ sr->out.sgmap_cnt = 0;
}
static void softreq_destroy(struct nitrox_softreq *sr)
@@ -116,7 +83,7 @@ static void softreq_destroy(struct nitrox_softreq *sr)
* create_sg_component - create SG componets for N5 device.
* @sr: Request structure
* @sgtbl: SG table
- * @nr_comp: total number of components required
+ * @map_nents: number of dma mapped entries
*
* Component structure
*
@@ -140,7 +107,7 @@ static int create_sg_component(struct nitrox_softreq *sr,
{
struct nitrox_device *ndev = sr->ndev;
struct nitrox_sgcomp *sgcomp;
- struct nitrox_sglist *sglist;
+ struct scatterlist *sg;
dma_addr_t dma;
size_t sz_comp;
int i, j, nr_sgcomp;
@@ -154,17 +121,15 @@ static int create_sg_component(struct nitrox_softreq *sr,
return -ENOMEM;
sgtbl->sgcomp = sgcomp;
- sgtbl->nr_sgcomp = nr_sgcomp;
- sglist = sgtbl->sglist;
+ sg = sgtbl->sg;
/* populate device sg component */
for (i = 0; i < nr_sgcomp; i++) {
- for (j = 0; j < 4; j++) {
- sgcomp->len[j] = cpu_to_be16(sglist->len);
- sgcomp->dma[j] = cpu_to_be64(sglist->dma);
- sglist++;
+ for (j = 0; j < 4 && sg; j++) {
+ sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
+ sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
+ sg = sg_next(sg);
}
- sgcomp++;
}
/* map the device sg component */
dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
@@ -174,8 +139,8 @@ static int create_sg_component(struct nitrox_softreq *sr,
return -ENOMEM;
}
- sgtbl->dma = dma;
- sgtbl->len = sz_comp;
+ sgtbl->sgcomp_dma = dma;
+ sgtbl->sgcomp_len = sz_comp;
return 0;
}
@@ -193,66 +158,27 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
{
struct device *dev = DEV(sr->ndev);
struct scatterlist *sg = req->src;
- struct nitrox_sglist *glist;
int i, nents, ret = 0;
- dma_addr_t dma;
- size_t sz;
- nents = sg_nents(req->src);
+ nents = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
- /* creater gather list IV and src entries */
- sz = roundup((1 + nents), 4) * sizeof(*glist);
- glist = kzalloc(sz, sr->gfp);
- if (!glist)
- return -ENOMEM;
+ for_each_sg(req->src, sg, nents, i)
+ sr->in.total_bytes += sg_dma_len(sg);
- sr->in.sglist = glist;
- /* map IV */
- dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, dma)) {
- ret = -EINVAL;
- goto iv_map_err;
- }
-
- sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
- /* map src entries */
- nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
- if (!nents) {
- ret = -EINVAL;
- goto src_map_err;
- }
- sr->in.buf = req->src;
-
- /* store the mappings */
- glist->len = req->ivsize;
- glist->dma = dma;
- glist++;
- sr->in.total_bytes += req->ivsize;
-
- for_each_sg(req->src, sg, nents, i) {
- glist->len = sg_dma_len(sg);
- glist->dma = sg_dma_address(sg);
- sr->in.total_bytes += glist->len;
- glist++;
- }
- /* roundup map count to align with entires in sg component */
- sr->in.map_bufs_cnt = (1 + nents);
-
- /* create NITROX gather component */
- ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
+ sr->in.sg = req->src;
+ sr->in.sgmap_cnt = nents;
+ ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
if (ret)
goto incomp_err;
return 0;
incomp_err:
- dma_unmap_sg(dev, req->src, nents, sr->in.dir);
- sr->in.map_bufs_cnt = 0;
-src_map_err:
- dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
-iv_map_err:
- kfree(sr->in.sglist);
- sr->in.sglist = NULL;
+ dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+ sr->in.sgmap_cnt = 0;
return ret;
}
@@ -260,104 +186,25 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
struct se_crypto_request *req)
{
struct device *dev = DEV(sr->ndev);
- struct nitrox_sglist *glist = sr->in.sglist;
- struct nitrox_sglist *slist;
- struct scatterlist *sg;
- int i, nents, map_bufs_cnt, ret = 0;
- size_t sz;
-
- nents = sg_nents(req->dst);
-
- /* create scatter list ORH, IV, dst entries and Completion header */
- sz = roundup((3 + nents), 4) * sizeof(*slist);
- slist = kzalloc(sz, sr->gfp);
- if (!slist)
- return -ENOMEM;
-
- sr->out.sglist = slist;
- sr->out.dir = DMA_BIDIRECTIONAL;
- /* map ORH */
- sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
- sr->out.dir);
- if (dma_mapping_error(dev, sr->resp.orh_dma)) {
- ret = -EINVAL;
- goto orh_map_err;
- }
+ int nents, ret = 0;
- /* map completion */
- sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
- COMP_HLEN, sr->out.dir);
- if (dma_mapping_error(dev, sr->resp.completion_dma)) {
- ret = -EINVAL;
- goto compl_map_err;
- }
+ nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
- sr->inplace = (req->src == req->dst) ? true : false;
- /* out place */
- if (!sr->inplace) {
- nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
- if (!nents) {
- ret = -EINVAL;
- goto dst_map_err;
- }
- }
- sr->out.buf = req->dst;
-
- /* store the mappings */
- /* orh */
- slist->len = ORH_HLEN;
- slist->dma = sr->resp.orh_dma;
- slist++;
-
- /* copy the glist mappings */
- if (sr->inplace) {
- nents = sr->in.map_bufs_cnt - 1;
- map_bufs_cnt = sr->in.map_bufs_cnt;
- while (map_bufs_cnt--) {
- slist->len = glist->len;
- slist->dma = glist->dma;
- slist++;
- glist++;
- }
- } else {
- /* copy iv mapping */
- slist->len = glist->len;
- slist->dma = glist->dma;
- slist++;
- /* copy remaining maps */
- for_each_sg(req->dst, sg, nents, i) {
- slist->len = sg_dma_len(sg);
- slist->dma = sg_dma_address(sg);
- slist++;
- }
- }
-
- /* completion */
- slist->len = COMP_HLEN;
- slist->dma = sr->resp.completion_dma;
-
- sr->out.map_bufs_cnt = (3 + nents);
-
- ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
+ sr->out.sg = req->dst;
+ sr->out.sgmap_cnt = nents;
+ ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
if (ret)
goto outcomp_map_err;
return 0;
outcomp_map_err:
- if (!sr->inplace)
- dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
- sr->out.map_bufs_cnt = 0;
- sr->out.buf = NULL;
-dst_map_err:
- dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
- sr->resp.completion_dma = 0;
-compl_map_err:
- dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
- sr->resp.orh_dma = 0;
-orh_map_err:
- kfree(sr->out.sglist);
- sr->out.sglist = NULL;
+ dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
+ sr->out.sgmap_cnt = 0;
+ sr->out.sg = NULL;
return ret;
}
@@ -422,6 +269,8 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
smp_mb__after_atomic();
return true;
}
+ /* sync with other cpus */
+ smp_mb__after_atomic();
return false;
}
@@ -477,8 +326,6 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
- struct skcipher_request *skreq;
-
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
ret = -ENOSPC;
@@ -490,12 +337,8 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* sync with other cpus */
smp_mb__after_atomic();
- skreq = sr->skreq;
/* post the command */
post_se_instr(sr, cmdq);
-
- /* backlog requests are posted, wakeup with -EINPROGRESS */
- skcipher_request_complete(skreq, -EINPROGRESS);
}
spin_unlock_bh(&cmdq->backlog_qlock);
@@ -518,7 +361,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
}
/* add to backlog list */
backlog_list_add(sr, cmdq);
- return -EBUSY;
+ return -EINPROGRESS;
}
post_se_instr(sr, cmdq);
@@ -535,7 +378,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t callback,
- struct skcipher_request *skreq)
+ void *cb_arg)
{
struct nitrox_softreq *sr;
dma_addr_t ctx_handle = 0;
@@ -552,12 +395,12 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
sr->flags = req->flags;
sr->gfp = req->gfp;
sr->callback = callback;
- sr->skreq = skreq;
+ sr->cb_arg = cb_arg;
atomic_set(&sr->status, REQ_NOT_POSTED);
- WRITE_ONCE(sr->resp.orh, PENDING_SIG);
- WRITE_ONCE(sr->resp.completion, PENDING_SIG);
+ sr->resp.orh = req->orh;
+ sr->resp.completion = req->comp;
ret = softreq_map_iobuf(sr, req);
if (ret) {
@@ -598,13 +441,13 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* fill the packet instruction */
/* word 0 */
- sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
+ sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
/* word 1 */
sr->instr.ih.value = 0;
sr->instr.ih.s.g = 1;
- sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
- sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
+ sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
@@ -626,11 +469,11 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* word 4 */
sr->instr.slc.value[0] = 0;
- sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
/* word 5 */
- sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
+ sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
/*
* No conversion for front data,
@@ -664,6 +507,24 @@ void backlog_qflush_work(struct work_struct *work)
post_backlog_cmds(cmdq);
}
+static bool sr_completed(struct nitrox_softreq *sr)
+{
+ u64 orh = READ_ONCE(*sr->resp.orh);
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
+
+ if ((orh != PENDING_SIG) && (orh & 0xff))
+ return true;
+
+ while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("comp not done\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* process_request_list - process completed requests
* @ndev: N5 device
@@ -675,8 +536,6 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
struct nitrox_softreq *sr;
- struct skcipher_request *skreq;
- completion_t callback;
int req_completed = 0, err = 0, budget;
/* check all pending requests */
@@ -691,13 +550,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
break;
/* check orh and completion bytes updates */
- if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
+ if (!sr_completed(sr)) {
/* request not completed, check for timeout */
if (!cmd_timeout(sr->tstamp, ndev->timeout))
break;
dev_err_ratelimited(DEV(ndev),
"Request timeout, orh 0x%016llx\n",
- READ_ONCE(sr->resp.orh));
+ READ_ONCE(*sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
atomic64_inc(&ndev->stats.completed);
@@ -706,15 +565,12 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* remove from response list */
response_list_del(sr, cmdq);
- callback = sr->callback;
- skreq = sr->skreq;
-
/* ORH error code */
- err = READ_ONCE(sr->resp.orh) & 0xff;
+ err = READ_ONCE(*sr->resp.orh) & 0xff;
softreq_destroy(sr);
- if (callback)
- callback(skreq, err);
+ if (sr->callback)
+ sr->callback(sr->cb_arg, err);
req_completed++;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
new file mode 100644
index 000000000000..d4935d6cefdd
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_req.h"
+
+struct nitrox_cipher {
+ const char *name;
+ enum flexi_cipher value;
+};
+
+/**
+ * supported cipher list
+ */
+static const struct nitrox_cipher flexi_cipher_table[] = {
+ { "null", CIPHER_NULL },
+ { "cbc(des3_ede)", CIPHER_3DES_CBC },
+ { "ecb(des3_ede)", CIPHER_3DES_ECB },
+ { "cbc(aes)", CIPHER_AES_CBC },
+ { "ecb(aes)", CIPHER_AES_ECB },
+ { "cfb(aes)", CIPHER_AES_CFB },
+ { "rfc3686(ctr(aes))", CIPHER_AES_CTR },
+ { "xts(aes)", CIPHER_AES_XTS },
+ { "cts(cbc(aes))", CIPHER_AES_CBC_CTS },
+ { NULL, CIPHER_INVALID }
+};
+
+static enum flexi_cipher flexi_cipher_type(const char *name)
+{
+ const struct nitrox_cipher *cipher = flexi_cipher_table;
+
+ while (cipher->name) {
+ if (!strcmp(cipher->name, name))
+ break;
+ cipher++;
+ }
+ return cipher->value;
+}
+
+static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+ struct crypto_ctx_hdr *chdr;
+
+ /* get the first device */
+ nctx->ndev = nitrox_get_first_device();
+ if (!nctx->ndev)
+ return -ENODEV;
+
+ /* allocate nitrox crypto context */
+ chdr = crypto_alloc_context(nctx->ndev);
+ if (!chdr) {
+ nitrox_put_device(nctx->ndev);
+ return -ENOMEM;
+ }
+ nctx->chdr = chdr;
+ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+ sizeof(struct ctx_hdr));
+ crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
+ sizeof(struct nitrox_kcrypt_request));
+ return 0;
+}
+
+static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+
+ /* free the nitrox crypto context */
+ if (nctx->u.ctx_handle) {
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+
+ memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys));
+ memzero_explicit(&fctx->auth, sizeof(struct auth_keys));
+ crypto_free_context((void *)nctx->chdr);
+ }
+ nitrox_put_device(nctx->ndev);
+
+ nctx->u.ctx_handle = 0;
+ nctx->ndev = NULL;
+}
+
+static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
+ int aes_keylen, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct flexi_crypto_context *fctx;
+ union fc_ctx_flags *flags;
+ enum flexi_cipher cipher_type;
+ const char *name;
+
+ name = crypto_tfm_alg_name(tfm);
+ cipher_type = flexi_cipher_type(name);
+ if (unlikely(cipher_type == CIPHER_INVALID)) {
+ pr_err("unsupported cipher: %s\n", name);
+ return -EINVAL;
+ }
+
+ /* fill crypto context */
+ fctx = nctx->u.fctx;
+ flags = &fctx->flags;
+ flags->f = 0;
+ flags->w0.cipher_type = cipher_type;
+ flags->w0.aes_keylen = aes_keylen;
+ flags->w0.iv_source = IV_FROM_DPTR;
+ flags->f = cpu_to_be64(*(u64 *)&flags->w0);
+ /* copy the key to context */
+ memcpy(fctx->crypto.u.key, key, keylen);
+
+ return 0;
+}
+
+static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ int aes_keylen;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ int nents = sg_nents(skreq->src) + 1;
+ int ret;
+
+ /* Allocate buffer to hold IV and input scatterlist array */
+ ret = alloc_src_req_buf(nkreq, nents, ivsize);
+ if (ret)
+ return ret;
+
+ nitrox_creq_copy_iv(nkreq->src, skreq->iv, ivsize);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, skreq->src,
+ skreq->cryptlen);
+
+ return 0;
+}
+
+static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ int nents = sg_nents(skreq->dst) + 3;
+ int ret;
+
+ /* Allocate buffer to hold ORH, COMPLETION and output scatterlist
+ * array
+ */
+ ret = alloc_dst_req_buf(nkreq, nents);
+ if (ret)
+ return ret;
+
+ nitrox_creq_set_orh(nkreq);
+ nitrox_creq_set_comp(nkreq);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, skreq->dst,
+ skreq->cryptlen);
+
+ return 0;
+}
+
+static void free_src_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->dst);
+}
+
+static void nitrox_skcipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+
+ free_src_sglist(skreq);
+ free_dst_sglist(skreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ skcipher_request_complete(skreq, err);
+}
+
+static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ struct se_crypto_request *creq;
+ int ret;
+
+ creq = &nkreq->creq;
+ creq->flags = skreq->base.flags;
+ creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ /* fill the request */
+ creq->ctrl.value = 0;
+ creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
+ /* param0: length of the data to be encrypted */
+ creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
+ creq->gph.param1 = 0;
+ /* param2: encryption data offset */
+ creq->gph.param2 = cpu_to_be16(ivsize);
+ creq->gph.param3 = 0;
+
+ creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
+
+ ret = alloc_src_sglist(skreq, ivsize);
+ if (ret)
+ return ret;
+
+ ret = alloc_dst_sglist(skreq, ivsize);
+ if (ret) {
+ free_src_sglist(skreq);
+ return ret;
+ }
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_skcipher_callback, skreq);
+}
+
+static int nitrox_aes_encrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, true);
+}
+
+static int nitrox_aes_decrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, false);
+}
+
+static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+}
+
+static int nitrox_3des_encrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, true);
+}
+
+static int nitrox_3des_decrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, false);
+}
+
+static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct flexi_crypto_context *fctx;
+ int aes_keylen, ret;
+
+ ret = xts_check_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ keylen /= 2;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ fctx = nctx->u.fctx;
+ /* copy KEY2 */
+ memcpy(fctx->auth.u.key2, (key + keylen), keylen);
+
+ return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct flexi_crypto_context *fctx;
+ int aes_keylen;
+
+ if (keylen < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ fctx = nctx->u.fctx;
+
+ memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
+ CTR_RFC3686_NONCE_SIZE);
+
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static struct skcipher_alg nitrox_skciphers[] = { {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "n5_cbc(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "n5_ecb(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "n5_cfb(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "n5_xts(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_xts_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "n5_rfc3686(ctr(aes))",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+ .setkey = nitrox_aes_ctr_rfc3686_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+}, {
+ .base = {
+ .cra_name = "cts(cbc(aes))",
+ .cra_driver_name = "n5_cts(cbc(aes))",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "n5_cbc(des3_ede)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = nitrox_3des_setkey,
+ .encrypt = nitrox_3des_encrypt,
+ .decrypt = nitrox_3des_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "n5_ecb(des3_ede)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = nitrox_3des_setkey,
+ .encrypt = nitrox_3des_encrypt,
+ .decrypt = nitrox_3des_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}
+
+};
+
+int nitrox_register_skciphers(void)
+{
+ return crypto_register_skciphers(nitrox_skciphers,
+ ARRAY_SIZE(nitrox_skciphers));
+}
+
+void nitrox_unregister_skciphers(void)
+{
+ crypto_unregister_skciphers(nitrox_skciphers,
+ ARRAY_SIZE(nitrox_skciphers));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
index 30c0aa874583..bf439d8256ba 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -6,7 +6,12 @@
#include "nitrox_hal.h"
#include "nitrox_common.h"
#include "nitrox_isr.h"
+#include "nitrox_mbx.h"
+/**
+ * num_vfs_valid - validate VF count
+ * @num_vfs: number of VF(s)
+ */
static inline bool num_vfs_valid(int num_vfs)
{
bool valid = false;
@@ -48,7 +53,32 @@ static inline enum vf_mode num_vfs_to_mode(int num_vfs)
return mode;
}
-static void pf_sriov_cleanup(struct nitrox_device *ndev)
+static inline int vf_mode_to_nr_queues(enum vf_mode mode)
+{
+ int nr_queues = 0;
+
+ switch (mode) {
+ case __NDEV_MODE_PF:
+ nr_queues = MAX_PF_QUEUES;
+ break;
+ case __NDEV_MODE_VF16:
+ nr_queues = 8;
+ break;
+ case __NDEV_MODE_VF32:
+ nr_queues = 4;
+ break;
+ case __NDEV_MODE_VF64:
+ nr_queues = 2;
+ break;
+ case __NDEV_MODE_VF128:
+ nr_queues = 1;
+ break;
+ }
+
+ return nr_queues;
+}
+
+static void nitrox_pf_cleanup(struct nitrox_device *ndev)
{
/* PF has no queues in SR-IOV mode */
atomic_set(&ndev->state, __NDEV_NOT_READY);
@@ -60,7 +90,11 @@ static void pf_sriov_cleanup(struct nitrox_device *ndev)
nitrox_common_sw_cleanup(ndev);
}
-static int pf_sriov_init(struct nitrox_device *ndev)
+/**
+ * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled
+ * @ndev: NITROX device
+ */
+static int nitrox_pf_reinit(struct nitrox_device *ndev)
{
int err;
@@ -86,6 +120,33 @@ static int pf_sriov_init(struct nitrox_device *ndev)
return nitrox_crypto_register();
}
+static void nitrox_sriov_cleanup(struct nitrox_device *ndev)
+{
+ /* unregister interrupts for PF in SR-IOV */
+ nitrox_sriov_unregister_interrupts(ndev);
+ nitrox_mbox_cleanup(ndev);
+}
+
+static int nitrox_sriov_init(struct nitrox_device *ndev)
+{
+ int ret;
+
+ /* register interrupts for PF in SR-IOV */
+ ret = nitrox_sriov_register_interupts(ndev);
+ if (ret)
+ return ret;
+
+ ret = nitrox_mbox_init(ndev);
+ if (ret)
+ goto sriov_init_fail;
+
+ return 0;
+
+sriov_init_fail:
+ nitrox_sriov_cleanup(ndev);
+ return ret;
+}
+
static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct nitrox_device *ndev = pci_get_drvdata(pdev);
@@ -106,17 +167,32 @@ static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
}
dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
- ndev->num_vfs = num_vfs;
ndev->mode = num_vfs_to_mode(num_vfs);
+ ndev->iov.num_vfs = num_vfs;
+ ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode);
/* set bit in flags */
set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
/* cleanup PF resources */
- pf_sriov_cleanup(ndev);
+ nitrox_pf_cleanup(ndev);
- config_nps_core_vfcfg_mode(ndev, ndev->mode);
+ /* PF SR-IOV mode initialization */
+ err = nitrox_sriov_init(ndev);
+ if (err)
+ goto iov_fail;
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
return num_vfs;
+
+iov_fail:
+ pci_disable_sriov(pdev);
+ /* clear bit in flags */
+ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+ ndev->iov.num_vfs = 0;
+ ndev->mode = __NDEV_MODE_PF;
+ /* reset back to working mode in PF */
+ nitrox_pf_reinit(ndev);
+ return err;
}
static int nitrox_sriov_disable(struct pci_dev *pdev)
@@ -134,12 +210,16 @@ static int nitrox_sriov_disable(struct pci_dev *pdev)
/* clear bit in flags */
clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
- ndev->num_vfs = 0;
+ ndev->iov.num_vfs = 0;
+ ndev->iov.max_vf_queues = 0;
ndev->mode = __NDEV_MODE_PF;
+ /* cleanup PF SR-IOV resources */
+ nitrox_sriov_cleanup(ndev);
+
config_nps_core_vfcfg_mode(ndev, ndev->mode);
- return pf_sriov_init(ndev);
+ return nitrox_pf_reinit(ndev);
}
int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3c6fe57f91f8..9108015e56cc 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -346,9 +346,7 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
- cipher_tfm = crypto_alloc_cipher("aes", 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(cipher_tfm)) {
pr_warn("could not load aes cipher driver\n");
return PTR_ERR(cipher_tfm);
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 01b82b82f8b8..f2643cda45db 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -58,6 +58,7 @@ struct cc_aead_ctx {
unsigned int enc_keylen;
unsigned int auth_keylen;
unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ unsigned int hash_len;
enum drv_cipher_mode cipher_mode;
enum cc_flow_mode flow_mode;
enum drv_hash_mode auth_mode;
@@ -122,6 +123,13 @@ static void cc_aead_exit(struct crypto_aead *tfm)
}
}
+static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
static int cc_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
@@ -196,6 +204,7 @@ static int cc_aead_init(struct crypto_aead *tfm)
ctx->auth_state.hmac.ipad_opad = NULL;
ctx->auth_state.hmac.padded_authkey = NULL;
}
+ ctx->hash_len = cc_get_aead_hash_len(tfm);
return 0;
@@ -327,7 +336,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -465,7 +474,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hashmode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -1001,7 +1010,7 @@ static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1098,7 +1107,7 @@ static void cc_proc_scheme_desc(struct aead_request *req,
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
@@ -1128,7 +1137,7 @@ static void cc_proc_scheme_desc(struct aead_request *req,
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -2358,6 +2367,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha1),cbc(des3_ede))",
@@ -2377,6 +2387,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_DES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),cbc(aes))",
@@ -2396,6 +2407,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),cbc(des3_ede))",
@@ -2415,6 +2427,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_DES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(xcbc(aes),cbc(aes))",
@@ -2434,6 +2447,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
@@ -2453,6 +2467,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
@@ -2472,6 +2487,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
@@ -2491,6 +2507,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ccm(aes)",
@@ -2510,6 +2527,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4309(ccm(aes))",
@@ -2529,6 +2547,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "gcm(aes)",
@@ -2548,6 +2567,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4106(gcm(aes))",
@@ -2567,6 +2587,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4543(gcm(aes))",
@@ -2586,6 +2607,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
};
@@ -2670,7 +2692,8 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
+ if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & aead_algs[alg].std_body))
continue;
t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 7623b29911af..cc92b031fad1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -7,6 +7,7 @@
#include <crypto/internal/skcipher.h>
#include <crypto/des.h>
#include <crypto/xts.h>
+#include <crypto/sm4.h>
#include <crypto/scatterwalk.h>
#include "cc_driver.h"
@@ -83,6 +84,9 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
+ case S_DIN_to_SM4:
+ if (size == SM4_KEY_SIZE)
+ return 0;
default:
break;
}
@@ -122,6 +126,17 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
break;
+ case S_DIN_to_SM4:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ if (IS_ALIGNED(size, SM4_BLOCK_SIZE))
+ return 0;
+ default:
+ break;
+ }
default:
break;
}
@@ -522,6 +537,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
case S_DIN_to_DES:
flow_mode = DIN_DES_DOUT;
break;
+ case S_DIN_to_SM4:
+ flow_mode = DIN_SM4_DOUT;
+ break;
default:
dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
return;
@@ -815,6 +833,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts512(paes)",
@@ -832,6 +851,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts4096(paes)",
@@ -849,6 +869,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv(paes)",
@@ -865,6 +886,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv512(paes)",
@@ -882,6 +904,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv4096(paes)",
@@ -899,6 +922,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker(paes)",
@@ -915,6 +939,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker512(paes)",
@@ -932,6 +957,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker4096(paes)",
@@ -949,6 +975,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(paes)",
@@ -965,6 +992,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(paes)",
@@ -981,6 +1009,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ofb(paes)",
@@ -997,6 +1026,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "cts(cbc(paes))",
@@ -1013,6 +1043,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ctr(paes)",
@@ -1029,6 +1060,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CTR,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts(aes)",
@@ -1045,6 +1077,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts512(aes)",
@@ -1062,6 +1095,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts4096(aes)",
@@ -1079,6 +1113,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv(aes)",
@@ -1095,6 +1130,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv512(aes)",
@@ -1112,6 +1148,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv4096(aes)",
@@ -1129,6 +1166,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker(aes)",
@@ -1145,6 +1183,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker512(aes)",
@@ -1162,6 +1201,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker4096(aes)",
@@ -1179,6 +1219,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(aes)",
@@ -1195,6 +1236,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(aes)",
@@ -1211,6 +1253,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ofb(aes)",
@@ -1227,6 +1270,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cts(cbc(aes))",
@@ -1243,6 +1287,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ctr(aes)",
@@ -1259,6 +1304,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CTR,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(des3_ede)",
@@ -1275,6 +1321,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(des3_ede)",
@@ -1291,6 +1338,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(des)",
@@ -1307,6 +1355,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(des)",
@@ -1323,6 +1372,58 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(sm4)",
+ .driver_name = "cbc-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ecb(sm4)",
+ .driver_name = "ecb-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ctr(sm4)",
+ .driver_name = "ctr-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
},
};
@@ -1398,7 +1499,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
dev_dbg(dev, "Number of algorithms = %zu\n",
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
- if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
+ if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index e032544f4e31..c8dac273c563 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -115,7 +115,8 @@ enum drv_hash_mode {
DRV_HASH_CBC_MAC = 6,
DRV_HASH_XCBC_MAC = 7,
DRV_HASH_CMAC = 8,
- DRV_HASH_MODE_NUM = 9,
+ DRV_HASH_SM3 = 9,
+ DRV_HASH_MODE_NUM = 10,
DRV_HASH_RESERVE32B = S32_MAX
};
@@ -127,6 +128,7 @@ enum drv_hash_hw_mode {
DRV_HASH_HW_SHA512 = 4,
DRV_HASH_HW_SHA384 = 12,
DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_SM3 = 14,
DRV_HASH_HW_RESERVE32B = S32_MAX
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 1ff229c2aeab..8ada308d72ee 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -39,23 +39,38 @@ struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ int std_bodies;
};
/* Hardware revisions defs. */
+/* The 703 is a OSCCA only variant of the 713 */
+static const struct cc_hw_data cc703_hw = {
+ .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+};
+
+static const struct cc_hw_data cc713_hw = {
+ .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+};
+
static const struct cc_hw_data cc712_hw = {
- .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U
+ .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
+ .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc710_hw = {
- .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U
+ .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
+ .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc630p_hw = {
- .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
+ .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
+ .std_bodies = CC_STD_ALL
};
static const struct of_device_id arm_ccree_dev_of_match[] = {
+ { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
+ { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
{ .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
{ .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
{ .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
@@ -204,14 +219,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
hw_rev = (struct cc_hw_data *)dev_id->data;
new_drvdata->hw_rev_name = hw_rev->name;
new_drvdata->hw_rev = hw_rev->rev;
+ new_drvdata->std_bodies = hw_rev->std_bodies;
if (hw_rev->rev >= CC_HW_REV_712) {
- new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
} else {
- new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
@@ -297,15 +311,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
- /* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
- dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
- rc = -EINVAL;
- goto post_clk_err;
+ if (hw_rev->rev <= CC_HW_REV_712) {
+ /* Verify correct mapping */
+ signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (signature_val != hw_rev->sig) {
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
@@ -461,6 +477,14 @@ int cc_clk_on(struct cc_drvdata *drvdata)
return 0;
}
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ return HASH_LEN_SIZE_712;
+ else
+ return HASH_LEN_SIZE_630;
+}
+
void cc_clk_off(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d608a4faf662..5be7fd431b05 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -36,12 +36,19 @@
extern bool cc_dump_desc;
extern bool cc_dump_bytes;
-#define DRV_MODULE_VERSION "4.0"
+#define DRV_MODULE_VERSION "5.0"
enum cc_hw_rev {
CC_HW_REV_630 = 630,
CC_HW_REV_710 = 710,
- CC_HW_REV_712 = 712
+ CC_HW_REV_712 = 712,
+ CC_HW_REV_713 = 713
+};
+
+enum cc_std_body {
+ CC_STD_NIST = 0x1,
+ CC_STD_OSCCA = 0x2,
+ CC_STD_ALL = 0x3
};
#define CC_COHERENT_CACHE_PARAMS 0xEEE
@@ -127,10 +134,10 @@ struct cc_drvdata {
bool coherent;
char *hw_rev_name;
enum cc_hw_rev hw_rev;
- u32 hash_len_sz;
u32 axim_mon_offset;
u32 sig_offset;
u32 ver_offset;
+ int std_bodies;
};
struct cc_crypto_alg {
@@ -156,6 +163,7 @@ struct cc_alg_template {
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
u32 min_hw_rev;
+ enum cc_std_body std_body;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
@@ -182,6 +190,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
int cc_clk_on(struct cc_drvdata *drvdata);
void cc_clk_off(struct cc_drvdata *drvdata);
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
{
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index b9313306c36f..2c4ddc8fb76b 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -6,6 +6,7 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
+#include <crypto/sm3.h>
#include <crypto/internal/hash.h>
#include "cc_driver.h"
@@ -16,6 +17,7 @@
#define CC_MAX_HASH_SEQ_LEN 12
#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+#define CC_SM3_HASH_LEN_SIZE 8
struct cc_hash_handle {
cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
@@ -43,6 +45,9 @@ static u64 sha384_init[] = {
static u64 sha512_init[] = {
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+static const u32 sm3_init[] = {
+ SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
+ SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size);
@@ -82,6 +87,7 @@ struct cc_hash_ctx {
int hash_mode;
int hw_mode;
int inter_digestsize;
+ unsigned int hash_len;
struct completion setkey_comp;
bool is_hmac;
};
@@ -138,10 +144,10 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
ctx->hash_mode == DRV_HASH_SHA384)
memcpy(state->digest_bytes_len,
digest_len_sha512_init,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
else
memcpy(state->digest_bytes_len, digest_len_init,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
}
if (ctx->hash_mode != DRV_HASH_NULL) {
@@ -321,7 +327,7 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
/* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, 1);
@@ -367,7 +373,7 @@ static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_sram(&desc[idx],
cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -440,7 +446,7 @@ static int cc_hash_digest(struct ahash_request *req)
* digest
*/
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
@@ -454,14 +460,14 @@ static int cc_hash_digest(struct ahash_request *req)
/* Load the hash current length */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI,
state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT);
+ ctx->hash_len, NS_BIT);
} else {
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
@@ -478,7 +484,7 @@ static int cc_hash_digest(struct ahash_request *req)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ ctx->hash_len, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
@@ -504,7 +510,7 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
{
/* Restore hash digest */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
@@ -513,10 +519,10 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
/* Restore hash current length */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT);
+ ctx->hash_len, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -576,7 +582,7 @@ static int cc_hash_update(struct ahash_request *req)
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
@@ -585,9 +591,9 @@ static int cc_hash_update(struct ahash_request *req)
/* store current hash length in context */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 1);
+ ctx->hash_len, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
@@ -649,9 +655,9 @@ static int cc_do_finup(struct ahash_request *req, bool update)
/* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ ctx->hash_len, NS_BIT, 0);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
idx++;
@@ -749,7 +755,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -831,7 +837,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1069,6 +1075,16 @@ fail:
return -ENOMEM;
}
+static int cc_get_hash_len(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->hash_mode == DRV_HASH_SM3)
+ return CC_SM3_HASH_LEN_SIZE;
+ else
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
static int cc_cra_init(struct crypto_tfm *tfm)
{
struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1086,7 +1102,7 @@ static int cc_cra_init(struct crypto_tfm *tfm)
ctx->hw_mode = cc_alg->hw_mode;
ctx->inter_digestsize = cc_alg->inter_digestsize;
ctx->drvdata = cc_alg->drvdata;
-
+ ctx->hash_len = cc_get_hash_len(tfm);
return cc_alloc_ctx(ctx);
}
@@ -1465,8 +1481,8 @@ static int cc_hash_export(struct ahash_request *req, void *out)
memcpy(out, state->digest_buff, ctx->inter_digestsize);
out += ctx->inter_digestsize;
- memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
- out += ctx->drvdata->hash_len_sz;
+ memcpy(out, state->digest_bytes_len, ctx->hash_len);
+ out += ctx->hash_len;
memcpy(out, &curr_buff_cnt, sizeof(u32));
out += sizeof(u32);
@@ -1494,8 +1510,8 @@ static int cc_hash_import(struct ahash_request *req, const void *in)
memcpy(state->digest_buff, in, ctx->inter_digestsize);
in += ctx->inter_digestsize;
- memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
- in += ctx->drvdata->hash_len_sz;
+ memcpy(state->digest_bytes_len, in, ctx->hash_len);
+ in += ctx->hash_len;
/* Sanity check the data as much as possible */
memcpy(&tmp, in, sizeof(u32));
@@ -1515,6 +1531,7 @@ struct cc_hash_template {
char mac_name[CRYPTO_MAX_ALG_NAME];
char mac_driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
+ bool is_mac;
bool synchronize;
struct ahash_alg template_ahash;
int hash_mode;
@@ -1522,6 +1539,7 @@ struct cc_hash_template {
int inter_digestsize;
struct cc_drvdata *drvdata;
u32 min_hw_rev;
+ enum cc_std_body std_body;
};
#define CC_STATE_SIZE(_x) \
@@ -1536,6 +1554,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha1)",
.mac_driver_name = "hmac-sha1-ccree",
.blocksize = SHA1_BLOCK_SIZE,
+ .is_mac = true,
.synchronize = false,
.template_ahash = {
.init = cc_hash_init,
@@ -1555,6 +1574,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA1,
.inter_digestsize = SHA1_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha256",
@@ -1562,6 +1582,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha256)",
.mac_driver_name = "hmac-sha256-ccree",
.blocksize = SHA256_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1580,6 +1601,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha224",
@@ -1587,6 +1609,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha224)",
.mac_driver_name = "hmac-sha224-ccree",
.blocksize = SHA224_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1605,6 +1628,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha384",
@@ -1612,6 +1636,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha384)",
.mac_driver_name = "hmac-sha384-ccree",
.blocksize = SHA384_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1630,6 +1655,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha512",
@@ -1637,6 +1663,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha512)",
.mac_driver_name = "hmac-sha512-ccree",
.blocksize = SHA512_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1655,6 +1682,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "md5",
@@ -1662,6 +1690,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(md5)",
.mac_driver_name = "hmac-md5-ccree",
.blocksize = MD5_HMAC_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1680,11 +1709,38 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_MD5,
.inter_digestsize = MD5_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sm3",
+ .driver_name = "sm3-ccree",
+ .blocksize = SM3_BLOCK_SIZE,
+ .is_mac = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SM3,
+ .hw_mode = DRV_HASH_HW_SM3,
+ .inter_digestsize = SM3_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
},
{
.mac_name = "xcbc(aes)",
.mac_driver_name = "xcbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
@@ -1703,11 +1759,13 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_XCBC_MAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.mac_name = "cmac(aes)",
.mac_driver_name = "cmac-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
@@ -1726,6 +1784,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_CMAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
};
@@ -1780,6 +1839,7 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+ bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
int rc = 0;
/* Copy-to-sram digest-len */
@@ -1845,6 +1905,17 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
sram_buff_ofs += sizeof(sha256_init);
larval_seq_len = 0;
+ if (sm3_supported) {
+ cc_set_sram_desc(sm3_init, sram_buff_ofs,
+ ARRAY_SIZE(sm3_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sm3_init);
+ larval_seq_len = 0;
+ }
+
if (large_sha_supported) {
cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
(ARRAY_SIZE(sha384_init) * 2), larval_seq,
@@ -1911,6 +1982,9 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
sizeof(sha224_init) +
sizeof(sha256_init);
+ if (drvdata->hw_rev >= CC_HW_REV_713)
+ sram_size_to_alloc += sizeof(sm3_init);
+
if (drvdata->hw_rev >= CC_HW_REV_712)
sram_size_to_alloc += sizeof(digest_len_sha512_init) +
sizeof(sha384_init) + sizeof(sha512_init);
@@ -1937,30 +2011,33 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
struct cc_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
- /* We either support both HASH and MAC or none */
- if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
+ /* Check that the HW revision and variants are suitable */
+ if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & driver_hash[alg].std_body))
continue;
- /* register hmac version */
- t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
- if (IS_ERR(t_alg)) {
- rc = PTR_ERR(t_alg);
- dev_err(dev, "%s alg allocation failed\n",
- driver_hash[alg].driver_name);
- goto fail;
- }
- t_alg->drvdata = drvdata;
-
- rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (rc) {
- dev_err(dev, "%s alg registration failed\n",
- driver_hash[alg].driver_name);
- kfree(t_alg);
- goto fail;
- } else {
- list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ if (driver_hash[alg].is_mac) {
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry,
+ &hash_handle->hash_list);
+ }
}
-
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
hw_mode == DRV_CIPHER_CMAC)
continue;
@@ -2027,7 +2104,7 @@ static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
@@ -2162,6 +2239,8 @@ static const void *cc_larval_digest(struct device *dev, u32 mode)
return sha384_init;
case DRV_HASH_SHA512:
return sha512_init;
+ case DRV_HASH_SM3:
+ return sm3_init;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
return md5_init;
@@ -2182,6 +2261,8 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
+ bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
+ cc_sram_addr_t addr;
switch (mode) {
case DRV_HASH_NULL:
@@ -2200,19 +2281,31 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init));
- case DRV_HASH_SHA384:
+ case DRV_HASH_SM3:
return (hash_handle->larval_digest_sram_addr +
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init) +
sizeof(sha256_init));
+ case DRV_HASH_SHA384:
+ addr = (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init));
+ if (sm3_supported)
+ addr += sizeof(sm3_init);
+ return addr;
case DRV_HASH_SHA512:
- return (hash_handle->larval_digest_sram_addr +
+ addr = (hash_handle->larval_digest_sram_addr +
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init) +
sizeof(sha256_init) +
sizeof(sha384_init));
+ if (sm3_supported)
+ addr += sizeof(sm3_init);
+ return addr;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
}
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 45985b955d2c..7a9b90db7db7 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -42,6 +42,7 @@
#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY)
#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
@@ -107,6 +108,7 @@ enum cc_flow_mode {
AES_to_AES_to_HASH_and_DOUT = 13,
AES_to_AES_to_HASH = 14,
AES_to_HASH_and_AES = 15,
+ DIN_SM4_DOUT = 16,
DIN_AES_AESMAC = 17,
HASH_to_DOUT = 18,
/* setup flows */
@@ -114,9 +116,11 @@ enum cc_flow_mode {
S_DIN_to_AES2 = 33,
S_DIN_to_DES = 34,
S_DIN_to_RC4 = 35,
+ S_DIN_to_SM4 = 36,
S_DIN_to_HASH = 37,
S_AES_to_DOUT = 38,
S_AES2_to_DOUT = 39,
+ S_SM4_to_DOUT = 40,
S_RC4_to_DOUT = 41,
S_DES_to_DOUT = 42,
S_HASH_to_DOUT = 43,
@@ -394,6 +398,16 @@ static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
}
/*
+ * Set aes xor crypto key, this in some secenrios select SM3 engine
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
+}
+
+/*
* Set the DOUT field of a HW descriptors to SRAM mode
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
@@ -455,6 +469,22 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
}
/*
+ * Set the cipher mode for hash algorithms.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
+ * @hash_mode: specifies which hash is being handled
+ */
+static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
+ enum drv_cipher_mode cipher_mode,
+ enum drv_hash_mode hash_mode)
+{
+ set_cipher_mode(pdesc, cipher_mode);
+ if (hash_mode == DRV_HASH_SM3)
+ set_aes_xor_crypto_key(pdesc);
+}
+
+/*
* Set the cipher configuration fields.
*
* @pdesc: pointer HW descriptor struct
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index db203f8be429..bcef76508dfa 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
- return ctx->dev->u_ctx;
+ return container_of(ctx->dev, struct uld_ctx, dev);
}
static inline int is_ofld_imm(const struct sk_buff *skb)
@@ -198,18 +198,43 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
-static inline void chcr_handle_aead_resp(struct aead_request *req,
+static int chcr_inc_wrcount(struct chcr_dev *dev)
+{
+ int err = 0;
+
+ spin_lock_bh(&dev->lock_chcr_dev);
+ if (dev->state == CHCR_DETACH)
+ err = 1;
+ else
+ atomic_inc(&dev->inflight);
+
+ spin_unlock_bh(&dev->lock_chcr_dev);
+
+ return err;
+}
+
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+{
+ atomic_dec(&dev->inflight);
+}
+
+static inline int chcr_handle_aead_resp(struct aead_request *req,
unsigned char *input,
int err)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_dev *dev = a_ctx(tfm)->dev;
chcr_aead_common_exit(req);
if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW;
}
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
+
+ return err;
}
static void get_aes_decrypt_key(unsigned char *dec_key,
@@ -391,7 +416,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
int j;
@@ -399,7 +424,7 @@ static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
return;
j = walk->nents;
walk->to->len[j % 8] = htons(size);
- walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ walk->to->addr[j % 8] = cpu_to_be64(addr);
j++;
if ((j % 8) == 0)
walk->to++;
@@ -473,16 +498,16 @@ static inline void ulptx_walk_end(struct ulptx_walk *walk)
static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
if (!size)
return;
if (walk->nents == 0) {
walk->sgl->len0 = cpu_to_be32(size);
- walk->sgl->addr0 = cpu_to_be64(*addr);
+ walk->sgl->addr0 = cpu_to_be64(addr);
} else {
- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
walk->pair_idx = !walk->pair_idx;
if (!walk->pair_idx)
@@ -717,7 +742,7 @@ static inline void create_wreq(struct chcr_context *ctx,
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
+ FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
!!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
@@ -773,7 +798,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1100,6 +1125,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
@@ -1161,6 +1187,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
}
@@ -1187,7 +1214,10 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+
+ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (err)
+ goto error;
if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
@@ -1276,15 +1306,21 @@ error:
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -ENOSPC;
+ goto error;
+ }
}
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1295,15 +1331,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return isfull ? -EBUSY : -EINPROGRESS;
+error:
+ chcr_dec_wrcount(dev);
+ return err;
}
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
+
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
@@ -1311,8 +1355,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -ENOSPC;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
- &skb, CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
@@ -1333,10 +1377,11 @@ static int chcr_device_init(struct chcr_context *ctx)
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
+ err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
- ctx->dev = u_ctx->dev;
+ ctx->dev = &u_ctx->dev;
adap = padap(ctx->dev);
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
@@ -1344,7 +1389,6 @@ static int chcr_device_init(struct chcr_context *ctx)
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
- ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -1498,7 +1542,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1562,6 +1606,7 @@ static int chcr_ahash_update(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = NULL;
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
@@ -1570,12 +1615,6 @@ static int chcr_ahash_update(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
if (nbytes + req_ctx->reqlen >= bs) {
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1586,10 +1625,27 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
+ * inflight count for dev guarantees that lldi and padap is valid
+ */
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
+ }
+
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
@@ -1629,6 +1685,8 @@ static int chcr_ahash_update(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1646,10 +1704,16 @@ static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ int error = -EINVAL;
+
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
chcr_init_hctx_per_wr(req_ctx);
u_ctx = ULD_CTX(h_ctx(rtfm));
@@ -1686,19 +1750,25 @@ static int chcr_ahash_final(struct ahash_request *req)
}
params.hash_size = crypto_ahash_digestsize(rtfm);
skb = create_hash_wr(req, &params);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+err:
+ chcr_dec_wrcount(dev);
+ return error;
}
static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1707,17 +1777,24 @@ static int chcr_ahash_finup(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1774,6 +1851,8 @@ static int chcr_ahash_finup(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1781,6 +1860,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1789,19 +1869,26 @@ static int chcr_ahash_digest(struct ahash_request *req)
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1854,6 +1941,8 @@ static int chcr_ahash_digest(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1925,6 +2014,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
int digestsize, updated_digestsize;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct chcr_dev *dev = h_ctx(tfm)->dev;
if (input == NULL)
goto out;
@@ -1967,6 +2057,7 @@ unmap:
out:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
}
@@ -1983,14 +2074,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ chcr_handle_cipher_resp(ablkcipher_request_cast(req),
input, err);
break;
-
case CRYPTO_ALG_TYPE_AHASH:
chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
@@ -2008,7 +2098,7 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
chcr_init_hctx_per_wr(state);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -2215,10 +2305,7 @@ static int chcr_aead_common_init(struct aead_request *req)
error = -ENOMEM;
goto err;
}
- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
- CHCR_SRC_SG_SIZE, 0);
- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
- CHCR_SRC_SG_SIZE, req->assoclen);
+
return 0;
err:
return error;
@@ -2249,7 +2336,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) :
crypto_aead_encrypt(subreq);
}
@@ -2268,10 +2355,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
- unsigned int kctx_len = 0, dnents;
- unsigned int assoclen = req->assoclen;
+ unsigned int kctx_len = 0, dnents, snents;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -2288,24 +2375,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
- assoclen = 0;
- reqctx->aad_nents = 0;
}
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
dnents += MIN_AUTH_SG; // For IV
-
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
- + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
+ : (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
@@ -2315,7 +2398,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2331,16 +2414,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
- assoclen + 1);
- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1,
+ null ? 0 : 1 + IV,
+ null ? 0 : IV + req->assoclen,
+ req->assoclen + IV + 1,
(temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
temp & 0xF,
- null ? 0 : assoclen + IV + 1,
+ null ? 0 : req->assoclen + IV + 1,
temp, temp);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
@@ -2367,23 +2450,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
} else {
- memcpy(reqctx->iv, req->iv, IV);
+ memcpy(ivptr, req->iv, IV);
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0);
reqctx->skb = skb;
@@ -2470,8 +2554,7 @@ void chcr_aead_dma_unmap(struct device *dev,
}
void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen)
+ struct ulptx_sgl *ulptx)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2484,28 +2567,20 @@ void chcr_add_aead_src_ent(struct aead_request *req,
buf += reqctx->b0_len;
}
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, assoclen, 0);
- buf += assoclen;
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, req->cryptlen, req->assoclen);
+ buf, req->cryptlen + req->assoclen, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (reqctx->b0_len)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
- &reqctx->b0_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
- req->assoclen);
+ reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+ req->assoclen, 0);
ulptx_walk_end(&ulp_walk);
}
}
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2516,12 +2591,10 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
- if (reqctx->b0_len)
- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ temp = req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
@@ -2589,7 +2662,7 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->hctx_wr.dma_addr);
+ reqctx->hctx_wr.dma_addr);
ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
param->sg_len, reqctx->hctx_wr.src_ofst);
reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
@@ -2689,8 +2762,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
return 0;
}
-static void generate_b0(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+static void generate_b0(struct aead_request *req, u8 *ivptr,
unsigned short op_type)
{
unsigned int l, lp, m;
@@ -2701,7 +2773,7 @@ static void generate_b0(struct aead_request *req,
m = crypto_aead_authsize(aead);
- memcpy(b0, reqctx->iv, 16);
+ memcpy(b0, ivptr, 16);
lp = b0[0];
l = lp + 1;
@@ -2727,29 +2799,31 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
}
static int ccm_format_packet(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+ u8 *ivptr,
unsigned int sub_type,
unsigned short op_type,
unsigned int assoclen)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
int rc = 0;
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
- reqctx->iv[0] = 3;
- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
- memcpy(reqctx->iv + 4, req->iv, 8);
- memset(reqctx->iv + 12, 0, 4);
+ ivptr[0] = 3;
+ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
+ memcpy(ivptr + 4, req->iv, 8);
+ memset(ivptr + 12, 0, 4);
} else {
- memcpy(reqctx->iv, req->iv, 16);
+ memcpy(ivptr, req->iv, 16);
}
if (assoclen)
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
- generate_b0(req, aeadctx, op_type);
+ generate_b0(req, ivptr, op_type);
/* zero the ctr value */
- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
return rc;
}
@@ -2762,7 +2836,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->tx_chan_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2775,7 +2849,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + IV + 1 + ccm_xtra) : 0;
+ (req->assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2785,13 +2859,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, assoclen + 1 + ccm_xtra);
+ 2, 1);
sec_cpl->pldlen =
- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
+ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- 1, assoclen + ccm_xtra, assoclen
- + IV + 1 + ccm_xtra, 0);
+ 1 + IV, IV + assoclen + ccm_xtra,
+ req->assoclen + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2838,10 +2912,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2857,37 +2932,38 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
if (error)
goto err;
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
+ (reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ snents += MIN_CCM_SG; //For B0
kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
reqctx->b0_len, 16) :
- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
- MIN_CCM_SG) * 8);
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
- reqctx->b0_len, transhdr_len, reqctx->op)) {
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
@@ -2897,16 +2973,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
if (error)
goto dstmap_fail;
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
reqctx->b0_len) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0);
@@ -2931,10 +3008,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- unsigned int transhdr_len, dnents = 0;
+ unsigned int transhdr_len, dnents = 0, snents;
unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2946,19 +3024,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
(reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
- (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
@@ -2968,7 +3046,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2979,15 +3057,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
//Offset of tag from end
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->dev->rx_channel_id, 2,
- (assoclen + 1));
+ a_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + IV + req->cryptlen);
+ htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1, 0);
+ assoclen ? 1 + IV : 0,
+ assoclen ? IV + assoclen : 0,
+ req->assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
temp, temp);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
@@ -3002,25 +3080,26 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
/* prepare a 16 byte iv */
/* S A L T | IV | 0x00000001 */
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
- memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
+ memcpy(ivptr, aeadctx->salt, 4);
+ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
+ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
}
- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+ *((unsigned int *)(ivptr + 12)) = htonl(0x01);
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ ulptx = (struct ulptx_sgl *)(ivptr + 16);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
@@ -3118,12 +3197,12 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_HW;
break;
case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_14:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_16:
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
@@ -3565,27 +3644,42 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
int isfull = 0;
+ struct chcr_dev *cdev;
- if (!a_ctx(tfm)->dev) {
+ cdev = a_ctx(tfm)->dev;
+ if (!cdev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO;
}
+
+ if (chcr_inc_wrcount(cdev)) {
+ /* Detach state for CHCR means lldi or padap is freed.
+ * We cannot increment fallback here.
+ */
+ return chcr_aead_fallback(req, reqctx->op);
+ }
+
u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
a_ctx(tfm)->tx_qidx)) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ chcr_dec_wrcount(cdev);
return -ENOSPC;
+ }
}
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb)
+ if (IS_ERR(skb) || !skb) {
+ chcr_dec_wrcount(cdev);
return PTR_ERR(skb);
+ }
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
@@ -3722,7 +3816,6 @@ static struct chcr_alg_template driver_algs[] = {
.setkey = chcr_aes_rfc3686_setkey,
.encrypt = chcr_aes_encrypt,
.decrypt = chcr_aes_decrypt,
- .geniv = "seqiv",
}
}
},
@@ -4178,7 +4271,6 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
-
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 1871500309e2..ee20dd899e83 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -262,7 +262,7 @@
#define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
-#define MIN_CCM_SG 2 /*IV+B0*/
+#define MIN_CCM_SG 1 /*IV+B0*/
#define CIP_SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
#define HASH_SPACE_LEFT(len) \
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2c472e3c6aeb..239b933d6df6 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -26,10 +26,7 @@
#include "chcr_core.h"
#include "cxgb4_uld.h"
-static LIST_HEAD(uld_ctx_list);
-static DEFINE_MUTEX(dev_mutex);
-static atomic_t dev_count;
-static struct uld_ctx *ctx_rr;
+static struct chcr_driver_data drv_data;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
@@ -53,6 +50,29 @@ static struct cxgb4_uld_info chcr_uld_info = {
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
+static void detach_work_fn(struct work_struct *work)
+{
+ struct chcr_dev *dev;
+
+ dev = container_of(work, struct chcr_dev, detach_work.work);
+
+ if (atomic_read(&dev->inflight)) {
+ dev->wqretry--;
+ if (dev->wqretry) {
+ pr_debug("Request Inflight Count %d\n",
+ atomic_read(&dev->inflight));
+
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ } else {
+ WARN(1, "CHCR:%d request Still Pending\n",
+ atomic_read(&dev->inflight));
+ complete(&dev->detach_comp);
+ }
+ } else {
+ complete(&dev->detach_comp);
+ }
+}
+
struct uld_ctx *assign_chcr_device(void)
{
struct uld_ctx *u_ctx = NULL;
@@ -63,56 +83,74 @@ struct uld_ctx *assign_chcr_device(void)
* Although One session must use the same device to
* maintain request-response ordering.
*/
- mutex_lock(&dev_mutex);
- if (!list_empty(&uld_ctx_list)) {
- u_ctx = ctx_rr;
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
- ctx_rr = list_first_entry(&uld_ctx_list,
- struct uld_ctx,
- entry);
+ mutex_lock(&drv_data.drv_mutex);
+ if (!list_empty(&drv_data.act_dev)) {
+ u_ctx = drv_data.last_dev;
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
else
- ctx_rr = list_next_entry(ctx_rr, entry);
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
}
- mutex_unlock(&dev_mutex);
+ mutex_unlock(&drv_data.drv_mutex);
return u_ctx;
}
-static int chcr_dev_add(struct uld_ctx *u_ctx)
+static void chcr_dev_add(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENXIO;
+ dev = &u_ctx->dev;
+ dev->state = CHCR_ATTACH;
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_move(&u_ctx->entry, &drv_data.act_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
+}
+
+static void chcr_dev_init(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev;
+ dev = &u_ctx->dev;
spin_lock_init(&dev->lock_chcr_dev);
- u_ctx->dev = dev;
- dev->u_ctx = u_ctx;
- atomic_inc(&dev_count);
- mutex_lock(&dev_mutex);
- list_add_tail(&u_ctx->entry, &uld_ctx_list);
- if (!ctx_rr)
- ctx_rr = u_ctx;
- mutex_unlock(&dev_mutex);
- return 0;
+ INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
+ init_completion(&dev->detach_comp);
+ dev->state = CHCR_INIT;
+ dev->wqretry = WQ_RETRY;
+ atomic_inc(&drv_data.dev_count);
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
}
-static int chcr_dev_remove(struct uld_ctx *u_ctx)
+static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- if (ctx_rr == u_ctx) {
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
- ctx_rr = list_first_entry(&uld_ctx_list,
- struct uld_ctx,
- entry);
+ struct adapter *adap;
+
+ mutex_lock(&drv_data.drv_mutex);
+ if (drv_data.last_dev == u_ctx) {
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
else
- ctx_rr = list_next_entry(ctx_rr, entry);
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
}
- list_del(&u_ctx->entry);
- if (list_empty(&uld_ctx_list))
- ctx_rr = NULL;
- kfree(u_ctx->dev);
- u_ctx->dev = NULL;
- atomic_dec(&dev_count);
+ list_move(&u_ctx->entry, &drv_data.inact_dev);
+ if (list_empty(&drv_data.act_dev))
+ drv_data.last_dev = NULL;
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+ atomic_dec(&drv_data.dev_count);
+ mutex_unlock(&drv_data.drv_mutex);
+
return 0;
}
@@ -131,12 +169,8 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
ack_err_status =
ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
- if (ack_err_status) {
- if (CHK_MAC_ERR_BIT(ack_err_status) ||
- CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EBADMSG;
- atomic_inc(&adap->chcr_stats.error);
- }
+ if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
+ error_status = -EBADMSG;
/* call completion callback with failure status */
if (req) {
error_status = chcr_handle_resp(req, input, error_status);
@@ -144,6 +178,9 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
}
+ if (error_status)
+ atomic_inc(&adap->chcr_stats.error);
+
return 0;
}
@@ -167,6 +204,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+ chcr_dev_init(u_ctx);
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
chcr_add_xfrmops(lld);
@@ -179,7 +217,7 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
- struct chcr_dev *dev = u_ctx->dev;
+ struct chcr_dev *dev = &u_ctx->dev;
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
if (rpl->opcode != CPL_FW6_PLD) {
@@ -201,6 +239,28 @@ int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+static void chcr_detach_device(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev = &u_ctx->dev;
+
+ spin_lock_bh(&dev->lock_chcr_dev);
+ if (dev->state == CHCR_DETACH) {
+ spin_unlock_bh(&dev->lock_chcr_dev);
+ pr_debug("Detached Event received for already detach device\n");
+ return;
+ }
+ dev->state = CHCR_DETACH;
+ spin_unlock_bh(&dev->lock_chcr_dev);
+
+ if (atomic_read(&dev->inflight) != 0) {
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ wait_for_completion(&dev->detach_comp);
+ }
+
+ // Move u_ctx to inactive_dev list
+ chcr_dev_move(u_ctx);
+}
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
@@ -208,23 +268,16 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
switch (state) {
case CXGB4_STATE_UP:
- if (!u_ctx->dev) {
- ret = chcr_dev_add(u_ctx);
- if (ret != 0)
- return ret;
+ if (u_ctx->dev.state != CHCR_INIT) {
+ // ALready Initialised.
+ return 0;
}
- if (atomic_read(&dev_count) == 1)
- ret = start_crypto();
+ chcr_dev_add(u_ctx);
+ ret = start_crypto();
break;
case CXGB4_STATE_DETACH:
- if (u_ctx->dev) {
- mutex_lock(&dev_mutex);
- chcr_dev_remove(u_ctx);
- mutex_unlock(&dev_mutex);
- }
- if (!atomic_read(&dev_count))
- stop_crypto();
+ chcr_detach_device(u_ctx);
break;
case CXGB4_STATE_START_RECOVERY:
@@ -237,7 +290,13 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
+ INIT_LIST_HEAD(&drv_data.act_dev);
+ INIT_LIST_HEAD(&drv_data.inact_dev);
+ atomic_set(&drv_data.dev_count, 0);
+ mutex_init(&drv_data.drv_mutex);
+ drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
+
return 0;
}
@@ -245,18 +304,20 @@ static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
- if (atomic_read(&dev_count))
- stop_crypto();
+ stop_crypto();
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
- if (u_ctx->dev)
- chcr_dev_remove(u_ctx);
+ mutex_lock(&drv_data.drv_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ list_del(&u_ctx->entry);
kfree(u_ctx);
}
- mutex_unlock(&dev_mutex);
- cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
}
module_init(chcr_crypto_init);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index de3a9c085daf..1159dee964ed 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -47,7 +47,7 @@
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
-
+#define WQ_DETACH_TM (msecs_to_jiffies(50))
#define PAD_ERROR_BIT 1
#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
@@ -61,9 +61,6 @@
#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
DUMMY_BYTES + \
sizeof(struct ulptx_sgl))
-
-#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
-
struct uld_ctx;
struct _key_ctx {
@@ -121,6 +118,20 @@ struct _key_ctx {
#define KEYCTX_TX_WR_AUTHIN_G(x) \
(((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+#define WQ_RETRY 5
+struct chcr_driver_data {
+ struct list_head act_dev;
+ struct list_head inact_dev;
+ atomic_t dev_count;
+ struct mutex drv_mutex;
+ struct uld_ctx *last_dev;
+};
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
struct chcr_wr {
struct fw_crypto_lookaside_wr wreq;
struct ulp_txpkt ulptx;
@@ -131,15 +142,18 @@ struct chcr_wr {
struct chcr_dev {
spinlock_t lock_chcr_dev;
- struct uld_ctx *u_ctx;
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
unsigned char tx_channel_id;
- unsigned char rx_channel_id;
};
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
- struct chcr_dev *dev;
+ struct chcr_dev dev;
};
struct sge_opaque_hdr {
@@ -159,8 +173,17 @@ struct chcr_ipsec_wr {
struct chcr_ipsec_req req;
};
+#define ESN_IV_INSERT_OFFSET 12
+struct chcr_ipsec_aadiv {
+ __be32 spi;
+ u8 seq_no[8];
+ u8 iv[8];
+};
+
struct ipsec_sa_entry {
int hmac_ctrl;
+ u16 esn;
+ u16 imm;
unsigned int enckey_len;
unsigned int kctx_len;
unsigned int authsize;
@@ -181,6 +204,13 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static inline void *padap(struct chcr_dev *dev)
+{
+ struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
+
+ return pci_get_drvdata(u_ctx->lldi.pdev);
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d37ef41f9ebe..655606f2e4d0 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -41,7 +41,8 @@
#define CCM_B0_SIZE 16
#define CCM_AAD_FIELD_SIZE 2
-#define T6_MAX_AAD_SIZE 511
+// 511 - 16(For IV)
+#define T6_MAX_AAD_SIZE 495
/* Define following if h/w is not dropping the AAD and IV data before
@@ -185,9 +186,6 @@ struct chcr_aead_reqctx {
dma_addr_t b0_dma;
unsigned int b0_len;
unsigned int op;
- short int aad_nents;
- short int src_nents;
- short int dst_nents;
u16 imm;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
@@ -322,10 +320,8 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
unsigned short op_type);
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid);
-void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
- unsigned int assoclen);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index ceaa16b8f72e..2fb48cce4462 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -76,12 +76,14 @@ static int chcr_xfrm_add_state(struct xfrm_state *x);
static void chcr_xfrm_del_state(struct xfrm_state *x);
static void chcr_xfrm_free_state(struct xfrm_state *x);
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static void chcr_advance_esn_state(struct xfrm_state *x);
static const struct xfrmdev_ops chcr_xfrmdev_ops = {
.xdo_dev_state_add = chcr_xfrm_add_state,
.xdo_dev_state_delete = chcr_xfrm_del_state,
.xdo_dev_state_free = chcr_xfrm_free_state,
.xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = chcr_advance_esn_state,
};
/* Add offload xfrms to Chelsio Interface */
@@ -210,10 +212,6 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
pr_debug("CHCR: Cannot offload compressed xfrm states\n");
return -EINVAL;
}
- if (x->props.flags & XFRM_STATE_ESN) {
- pr_debug("CHCR: Cannot offload ESN xfrm states\n");
- return -EINVAL;
- }
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
@@ -266,6 +264,8 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
}
sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ if (x->props.flags & XFRM_STATE_ESN)
+ sa_entry->esn = 1;
chcr_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
try_module_get(THIS_MODULE);
@@ -294,28 +294,57 @@ static void chcr_xfrm_free_state(struct xfrm_state *x)
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
-
+ if (x->props.family == AF_INET) {
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
return true;
}
-static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+static void chcr_advance_esn_state(struct xfrm_state *x)
{
- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry)
+{
+ unsigned int kctx_len;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) + kctx_len;
hdrlen += sizeof(struct cpl_tx_pkt);
+ if (sa_entry->esn)
+ hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
+ << 4);
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
}
static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
- unsigned int kctx_len)
+ struct ipsec_sa_entry *sa_entry)
{
+ unsigned int kctx_len;
unsigned int flits;
- int hdrlen = is_eth_imm(skb, kctx_len);
+ int aadivlen;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = is_eth_imm(skb, sa_entry);
+ aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ 16) : 0;
+ aadivlen <<= 4;
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -338,13 +367,69 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
flits += (sizeof(struct fw_ulptx_wr) +
sizeof(struct chcr_ipsec_req) +
kctx_len +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ sizeof(struct cpl_tx_pkt_core) +
+ aadivlen) / sizeof(__be64);
return flits;
}
+inline void *copy_esn_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct chcr_ipsec_aadiv *aadiv;
+ struct ulptx_idata *sc_imm;
+ struct ip_esp_hdr *esphdr;
+ struct xfrm_offload *xo;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ __be64 seqno;
+ u32 qidx;
+ u32 seqlo;
+ u8 *iv;
+ int eoq;
+ int len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ if (!eoq)
+ pos = q->q.desc;
+
+ len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
+ memset(pos, 0, len);
+ aadiv = (struct chcr_ipsec_aadiv *)pos;
+ esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ xo = xfrm_offload(skb);
+
+ aadiv->spi = (esphdr->spi);
+ seqlo = htonl(esphdr->seq_no);
+ seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
+ memcpy(aadiv->seq_no, &seqno, 8);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ memcpy(aadiv->iv, iv, 8);
+
+ if (sa_entry->imm) {
+ sc_imm = (struct ulptx_idata *)(pos +
+ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ sizeof(__be64)) << 3));
+ sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
+ sc_imm->len = cpu_to_be32(sa_entry->imm);
+ }
+ pos += len;
+ return pos;
+}
+
inline void *copy_cpltx_pktxt(struct sk_buff *skb,
- struct net_device *dev,
- void *pos)
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
{
struct cpl_tx_pkt_core *cpl;
struct sge_eth_txq *q;
@@ -379,6 +464,9 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
cpl->ctrl1 = cpu_to_be64(cntrl);
pos += sizeof(struct cpl_tx_pkt_core);
+ /* Copy ESN info for HW */
+ if (sa_entry->esn)
+ pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
return pos;
}
@@ -425,7 +513,7 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
pos = (u8 *)q->q.desc + (key_len - left);
}
/* Copy CPL TX PKT XT */
- pos = copy_cpltx_pktxt(skb, dev, pos);
+ pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
return pos;
}
@@ -438,10 +526,16 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- unsigned int immdatalen = 0;
unsigned int ivsize = GCM_ESP_IV_SIZE;
struct chcr_ipsec_wr *wr;
+ u16 immdatalen = 0;
unsigned int flits;
+ u32 ivinoffset;
+ u32 aadstart;
+ u32 aadstop;
+ u32 ciphstart;
+ u32 ivdrop = 0;
+ u32 esnlen = 0;
u32 wr_mid;
int qidx = skb_get_queue_mapping(skb);
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
@@ -450,10 +544,17 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
atomic_inc(&adap->chcr_stats.ipsec_cnt);
- flits = calc_tx_sec_flits(skb, kctx_len);
+ flits = calc_tx_sec_flits(skb, sa_entry);
+ if (sa_entry->esn)
+ ivdrop = 1;
- if (is_eth_imm(skb, kctx_len))
+ if (is_eth_imm(skb, sa_entry)) {
immdatalen = skb->len;
+ sa_entry->imm = immdatalen;
+ }
+
+ if (sa_entry->esn)
+ esnlen = sizeof(struct chcr_ipsec_aadiv);
/* WR Header */
wr = (struct chcr_ipsec_wr *)pos;
@@ -478,33 +579,38 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
sizeof(wr->req.key_ctx) +
kctx_len +
sizeof(struct cpl_tx_pkt_core) +
- immdatalen);
+ esnlen +
+ (esnlen ? 0 : immdatalen));
/* CPL_SEC_PDU */
+ ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1);
wr->req.sec_cpl.op_ivinsrtofst = htonl(
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(2) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) + 1)));
+ ivinoffset));
- wr->req.sec_cpl.pldlen = htonl(skb->len);
+ wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
+ aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
+ aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr));
+ ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1;
+ ciphstart += sa_entry->esn ? esnlen : 0;
wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- (skb_transport_offset(skb) + 1),
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr)),
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) +
- GCM_ESP_IV_SIZE + 1), 0);
+ aadstart,
+ aadstop,
+ ciphstart, 0);
wr->req.sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) +
- GCM_ESP_IV_SIZE + 1,
- sa_entry->authsize,
- sa_entry->authsize);
+ FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
+ sa_entry->authsize,
+ sa_entry->authsize);
wr->req.sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -512,7 +618,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
sa_entry->hmac_ctrl,
ivsize >> 1);
wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 0, 0);
+ 0, ivdrop, 0);
pos += sizeof(struct fw_ulptx_wr) +
sizeof(struct ulp_txpkt) +
@@ -565,7 +671,7 @@ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
int qidx, left, credits;
- unsigned int flits = 0, ndesc, kctx_len;
+ unsigned int flits = 0, ndesc;
struct adapter *adap;
struct sge_eth_txq *q;
struct port_info *pi;
@@ -577,7 +683,6 @@ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
- kctx_len = sa_entry->kctx_len;
sp = skb_sec_path(skb);
if (sp->len != 1) {
@@ -592,7 +697,7 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ flits = calc_tx_sec_flits(skb, sa_entry);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -605,7 +710,7 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb, kctx_len))
+ if (is_eth_imm(skb, sa_entry))
immediate = true;
if (!immediate &&
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index eb2a0a73cbed..b4c24a35b3d0 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -261,7 +261,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
op->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 3aef1d43e435..d531c14020dc 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -970,7 +970,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = {
.cra_name = "cbc(des)",
.cra_driver_name = "safexcel-cbc-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1010,7 +1010,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = {
.cra_name = "ecb(des)",
.cra_driver_name = "safexcel-ecb-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1074,7 +1074,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "safexcel-cbc-des3_ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1114,7 +1114,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "safexcel-ecb-des3_ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 27f7dad2d45d..19fba998b86b 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1194,7 +1194,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1221,7 +1220,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1247,7 +1245,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1273,7 +1270,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1287,7 +1283,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .geniv = "eseqiv",
.setkey = ablk_rfc3686_setkey,
.encrypt = ablk_rfc3686_crypt,
.decrypt = ablk_rfc3686_crypt }
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index e01c46387df8..519086730791 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -178,12 +178,12 @@ static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
else
from = scc->black_memory;
- dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+ dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
ctx->dst_nents * 8);
len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
from, ctx->size, ctx->offset);
if (!len) {
- dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+ dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
return -EINVAL;
}
@@ -274,7 +274,7 @@ static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
to, len, ctx->offset);
if (!len) {
- dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+ dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
return -EINVAL;
}
@@ -335,9 +335,9 @@ static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
return;
}
- dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
- (void *)readl(scc->base + SCC_SCM_RED_START),
- (void *)readl(scc->base + SCC_SCM_BLACK_START));
+ dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
+ readl(scc->base + SCC_SCM_RED_START),
+ readl(scc->base + SCC_SCM_BLACK_START));
/* clear interrupt control registers */
writel(SCC_SCM_INTR_CTRL_CLR_INTR,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 4e6ff32f8a7e..a2105cf33abb 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/stmp_device.h>
+#include <linux/clk.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
@@ -82,6 +83,7 @@ struct dcp {
spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
+ struct clk *dcp_clk;
};
enum dcp_chan {
@@ -1053,11 +1055,24 @@ static int mxs_dcp_probe(struct platform_device *pdev)
/* Re-align the structure so it fits the DCP constraints. */
sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
- /* Restart the DCP block. */
- ret = stmp_reset_block(sdcp->base);
+ /* DCP clock is optional, only used on some SOCs */
+ sdcp->dcp_clk = devm_clk_get(dev, "dcp");
+ if (IS_ERR(sdcp->dcp_clk)) {
+ if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
+ return PTR_ERR(sdcp->dcp_clk);
+ sdcp->dcp_clk = NULL;
+ }
+ ret = clk_prepare_enable(sdcp->dcp_clk);
if (ret)
return ret;
+ /* Restart the DCP block. */
+ ret = stmp_reset_block(sdcp->base);
+ if (ret) {
+ dev_err(dev, "Failed reset\n");
+ goto err_disable_unprepare_clk;
+ }
+
/* Initialize control register. */
writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
@@ -1094,7 +1109,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
NULL, "mxs_dcp_chan/sha");
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
dev_err(dev, "Error starting SHA thread!\n");
- return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ goto err_disable_unprepare_clk;
}
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1151,6 +1167,10 @@ err_destroy_aes_thread:
err_destroy_sha_thread:
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+
+err_disable_unprepare_clk:
+ clk_disable_unprepare(sdcp->dcp_clk);
+
return ret;
}
@@ -1170,6 +1190,8 @@ static int mxs_dcp_remove(struct platform_device *pdev)
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+ clk_disable_unprepare(sdcp->dcp_clk);
+
platform_set_drvdata(pdev, NULL);
global_sdcp = NULL;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 898c0a280511..5a26fcd75d2d 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -159,7 +159,6 @@ struct crypto_alg nx_ctr3686_aes_alg = {
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- .geniv = "seqiv",
.setkey = ctr3686_aes_nx_set_key,
.encrypt = ctr3686_aes_nx_crypt,
.decrypt = ctr3686_aes_nx_crypt,
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a553ffddb11b..0120feb2d746 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -749,7 +749,6 @@ static struct crypto_alg algs_ctr[] = {
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .geniv = "eseqiv",
.ivsize = AES_BLOCK_SIZE,
.setkey = omap_aes_setkey,
.encrypt = omap_aes_ctr_encrypt,
@@ -1222,7 +1221,6 @@ static int omap_aes_probe(struct platform_device *pdev)
algp = &dd->pdata->algs_info[i].algs_list[j];
pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
err = crypto_register_alg(algp);
if (err)
@@ -1240,7 +1238,6 @@ static int omap_aes_probe(struct platform_device *pdev)
algp = &aalg->base;
pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
err = crypto_register_aead(aalg);
if (err)
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index eb95b0d7f184..6369019219d4 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1069,7 +1069,6 @@ static int omap_des_probe(struct platform_device *pdev)
algp = &dd->pdata->algs_info[i].algs_list[j];
pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
err = crypto_register_alg(algp);
if (err)
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a28f1d18fe01..17068b55fea5 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1585,8 +1585,7 @@ static struct spacc_alg l2_engine_algs[] = {
.cra_name = "f8(kasumi)",
.cra_driver_name = "f8-kasumi-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 585e1cab9ae3..25c13e26d012 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -376,7 +376,6 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
alg->cra_module = THIS_MODULE;
alg->cra_init = qce_ablkcipher_init;
alg->cra_exit = qce_ablkcipher_exit;
- INIT_LIST_HEAD(&alg->cra_list);
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index d8a5db11b7ea..fc45f5ea6fdd 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -508,7 +508,6 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base->cra_alignmask = 0;
base->cra_module = THIS_MODULE;
base->cra_init = qce_ahash_cra_init;
- INIT_LIST_HEAD(&base->cra_list);
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index bbf166a97ad3..8c32a3059b4a 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1321,7 +1321,6 @@ static int sahara_register_algs(struct sahara_dev *dev)
unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- INIT_LIST_HEAD(&aes_algs[i].cra_list);
err = crypto_register_alg(&aes_algs[i]);
if (err)
goto err_aes_algs;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6988012deca4..45e20707cef8 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -3155,7 +3155,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
- alg->cra_ablkcipher.geniv = "eseqiv";
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d2663a4e1f5e..a92a66b1ff46 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_src,
ctx->device->dma.sg_src_len,
- direction, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
break;
case DMA_FROM_DEVICE:
@@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_dst,
ctx->device->dma.sg_dst_len,
- direction,
+ DMA_DEV_TO_MEM,
DMA_CTRL_ACK |
DMA_PREP_INTERRUPT);
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 633321a8dd03..a0bb8a6eec3f 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
__func__);
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);