summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/Kconfig4
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c268
-rw-r--r--drivers/crypto/caam/caamalg_desc.c56
-rw-r--r--drivers/crypto/caam/caamalg_desc.h4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c257
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c325
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h31
-rw-r--r--drivers/crypto/caam/caamhash.c116
-rw-r--r--drivers/crypto/caam/caamhash_desc.c5
-rw-r--r--drivers/crypto/caam/caamhash_desc.h2
-rw-r--r--drivers/crypto/caam/caampkc.c99
-rw-r--r--drivers/crypto/caam/caamrng.c19
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c255
-rw-r--r--drivers/crypto/caam/desc_constr.h81
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c79
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.h18
-rw-r--r--drivers/crypto/caam/error.c64
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h32
-rw-r--r--drivers/crypto/caam/jr.c124
-rw-r--r--drivers/crypto/caam/key_gen.c14
-rw-r--r--drivers/crypto/caam/pdb.h16
-rw-r--r--drivers/crypto/caam/pkc_desc.c8
-rw-r--r--drivers/crypto/caam/qi.c10
-rw-r--r--drivers/crypto/caam/qi.h26
-rw-r--r--drivers/crypto/caam/regs.h141
28 files changed, 1253 insertions, 806 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 3720ddabb507..137ed3df0c74 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -98,7 +98,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
- select CRYPTO_DES
+ select CRYPTO_LIB_DES
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
@@ -111,6 +111,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance
@@ -161,6 +162,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
select CRYPTO_AUTHENC
select CRYPTO_AEAD
select CRYPTO_HASH
+ select CRYPTO_DES
help
CAAM driver for QorIQ Data Path Acceleration Architecture 2.
It handles DPSECI DPAA2 objects that sit on the Management Complex
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 9ab4e81ea21e..68d5cc0f28e2 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -30,3 +30,4 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
dpaa2_caam-y := caamalg_qi2.o dpseci.o
+dpaa2_caam-$(CONFIG_DEBUG_FS) += dpseci-debugfs.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 43f18253e5b6..2912006b946b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -74,7 +74,7 @@
#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
-#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
struct caam_alg_entry {
@@ -205,6 +205,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm>
+ * would result in invalid opcodes (last bytes of user key) in
+ * the resulting descriptor. Use DKP<ptr,imm> instead => both
+ * virtual and dma key addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -221,16 +233,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -253,16 +255,6 @@ skip_enc:
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -287,16 +279,6 @@ skip_enc:
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -376,6 +358,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -439,6 +426,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -503,6 +495,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ if (authsize != 16)
+ return -EINVAL;
+
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
@@ -633,33 +628,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_keys keys;
- u32 flags;
int err;
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
-
- err = -EINVAL;
- if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
-
- flags = crypto_aead_get_flags(aead);
- err = __des3_verify_key(&flags, keys.enckey);
- if (unlikely(err)) {
- crypto_aead_set_flags(aead, flags);
- goto out;
- }
+ return err;
- err = aead_setkey(aead, key, keylen);
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
+ aead_setkey(aead, key, keylen);
-out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static int gcm_setkey(struct crypto_aead *aead,
@@ -667,6 +646,13 @@ static int gcm_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -683,9 +669,13 @@ static int rfc4106_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ int err;
- if (keylen < 4)
- return -EINVAL;
+ err = aes_check_keylen(keylen - 4);
+ if (err) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -707,9 +697,13 @@ static int rfc4543_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ int err;
- if (keylen < 4)
- return -EINVAL;
+ err = aes_check_keylen(keylen - 4);
+ if (err) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -727,7 +721,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
}
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen, const u32 ctx1_iv_off)
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_skcipher_alg *alg =
@@ -736,30 +730,10 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
-
- /*
- * RFC3686 specific:
- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- * | *key = {KEY, NONCE}
- */
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
- }
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
@@ -782,25 +756,86 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
-static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ int err;
- if (keylen == DES3_EDE_KEY_SIZE &&
- __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) {
- return -EINVAL;
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
}
- if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) &
- CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_WEAK_KEY);
- return -EINVAL;
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
}
- return skcipher_setkey(skcipher, key, keylen);
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
}
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -930,19 +965,20 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct aead_request *req = context;
struct aead_edesc *edesc;
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
kfree(edesc);
- aead_request_complete(req, err);
+ aead_request_complete(req, ecode);
}
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -950,25 +986,20 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct aead_request *req = context;
struct aead_edesc *edesc;
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
- err = -EBADMSG;
-
kfree(edesc);
- aead_request_complete(req, err);
+ aead_request_complete(req, ecode);
}
static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -978,13 +1009,14 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
skcipher_unmap(jrdev, edesc, req);
@@ -993,10 +1025,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- if (ivsize) {
+ if (ivsize && !ecode) {
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
ivsize);
-
print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
@@ -1008,7 +1039,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
- skcipher_request_complete(req, err);
+ skcipher_request_complete(req, ecode);
}
static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -1018,12 +1049,13 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
skcipher_unmap(jrdev, edesc, req);
@@ -1032,7 +1064,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- if (ivsize) {
+ if (ivsize && !ecode) {
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
ivsize);
@@ -1047,7 +1079,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
- skcipher_request_complete(req, err);
+ skcipher_request_complete(req, ecode);
}
/*
@@ -1525,10 +1557,7 @@ static int chachapoly_decrypt(struct aead_request *req)
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return gcm_encrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
}
static int aead_encrypt(struct aead_request *req)
@@ -1602,10 +1631,7 @@ static int gcm_decrypt(struct aead_request *req)
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return gcm_decrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
}
static int aead_decrypt(struct aead_request *req)
@@ -1817,6 +1843,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
u32 *desc;
int ret = 0;
+ if (!req->cryptlen)
+ return 0;
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
@@ -1851,6 +1880,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
u32 *desc;
int ret = 0;
+ if (!req->cryptlen)
+ return 0;
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
@@ -1883,7 +1915,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-aes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = aes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1899,7 +1931,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = des_skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1931,7 +1963,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = ctr_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1949,7 +1981,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = rfc3686_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
@@ -2003,7 +2035,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ecb-aes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = aes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -2018,7 +2050,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ecb-des3-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = des_skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -2033,7 +2065,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ecb-arc4-caam",
.cra_blocksize = ARC4_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = arc4_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = ARC4_MIN_KEY_SIZE,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 72531837571e..aa9ccca67045 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -503,6 +503,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
const bool is_qi, int era)
{
u32 geniv, moveiv;
+ u32 *wait_cmd;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
@@ -598,6 +599,14 @@ copy_iv:
/* Will read cryptlen */
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * Wait for IV transfer (ofifo -> class2) to finish before starting
+ * ciphertext transfer (ofifo -> external memory).
+ */
+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_cmd);
+
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
@@ -843,13 +852,16 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_qi: true when called from caam/qi
+ *
+ * Input sequence: AAD | PTXT
+ * Output sequence: AAD | CTXT | ICV
+ * AAD length (assoclen), which includes the IV length, is available in Math3.
*/
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi)
{
- u32 *key_jump_cmd;
-
+ u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions;
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip key loading if it is loaded due to sharing */
@@ -892,24 +904,26 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ /* Skip AAD */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
- /* Skip IV */
- append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+ /* Read cryptlen and set this value into VARSEQOUTLEN */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* If cryptlen is ZERO jump to AAD command */
+ zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+ /* Read AAD data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA);
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Skip IV */
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+ append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ);
/* Write encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -918,6 +932,18 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ /* Jump instructions to avoid double reading of AAD */
+ skip_instructions = append_jump(desc, JUMP_TEST_ALL);
+
+ /* There is no input data, cryptlen = 0 */
+ set_jump_tgt_here(desc, zero_cryptlen_jump_cmd);
+
+ /* Read AAD */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ set_jump_tgt_here(desc, skip_instructions);
+
/* Write ICV */
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index da4a4ee60c80..f2893393ba5e 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,7 +12,7 @@
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
@@ -31,7 +31,7 @@
#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 16 * CAAM_CMD_SZ)
#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 32f0f8a72067..8e3449670d2f 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -105,6 +105,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -118,16 +130,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -143,16 +145,6 @@ skip_enc:
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -171,16 +163,6 @@ skip_enc:
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -252,11 +234,10 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
ctx->adata.keylen_pad + keys.enckeylen,
ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
@@ -296,33 +277,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_keys keys;
- u32 flags;
int err;
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
-
- err = -EINVAL;
- if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
-
- flags = crypto_aead_get_flags(aead);
- err = __des3_verify_key(&flags, keys.enckey);
- if (unlikely(err)) {
- crypto_aead_set_flags(aead, flags);
- goto out;
- }
+ return err;
- err = aead_setkey(aead, key, keylen);
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
+ aead_setkey(aead, key, keylen);
-out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static int gcm_set_sh_desc(struct crypto_aead *aead)
@@ -371,6 +336,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -385,6 +355,12 @@ static int gcm_setkey(struct crypto_aead *aead,
struct device *jrdev = ctx->jrdev;
int ret;
+ ret = aes_check_keylen(keylen);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -466,6 +442,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -480,8 +461,11 @@ static int rfc4106_setkey(struct crypto_aead *aead,
struct device *jrdev = ctx->jrdev;
int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -569,6 +553,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ if (authsize != 16)
+ return -EINVAL;
+
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
@@ -582,8 +569,11 @@ static int rfc4543_setkey(struct crypto_aead *aead,
struct device *jrdev = ctx->jrdev;
int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -624,7 +614,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
}
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen, const u32 ctx1_iv_off)
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_skcipher_alg *alg =
@@ -632,33 +622,12 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
skcipher);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
-
- /*
- * RFC3686 specific:
- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- * | *key = {KEY, NONCE}
- */
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
- }
-
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@@ -694,11 +663,80 @@ badkey:
return -EINVAL;
}
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- return unlikely(des3_verify_key(skcipher, key)) ?:
- skcipher_setkey(skcipher, key, keylen);
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
}
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -884,20 +922,8 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
qidev = caam_ctx->qidev;
- if (unlikely(status)) {
- u32 ssrc = status & JRSTA_SSRC_MASK;
- u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-
- caam_jr_strstatus(qidev, status);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if (ssrc == JRSTA_SSRC_CCB_ERROR &&
- err_id == JRSTA_CCBERR_ERRID_ICVCHK)
- ecode = -EBADMSG;
- else
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_jr_strstatus(qidev, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
aead_unmap(qidev, edesc, aead_req);
@@ -1168,18 +1194,14 @@ static int aead_decrypt(struct aead_request *req)
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_crypt(req, true);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
+ true);
}
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_crypt(req, false);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
+ false);
}
static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
@@ -1190,13 +1212,14 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ecode = 0;
dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
if (status)
- caam_jr_strstatus(qidev, status);
+ ecode = caam_jr_strstatus(qidev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1212,10 +1235,12 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
- skcipher_request_complete(req, status);
+ skcipher_request_complete(req, ecode);
}
static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
@@ -1377,6 +1402,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ret;
+ if (!req->cryptlen)
+ return 0;
+
if (unlikely(caam_congested))
return -EAGAIN;
@@ -1414,7 +1442,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-aes-caam-qi",
.cra_blocksize = AES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = aes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1446,7 +1474,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam-qi",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1462,7 +1490,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ctr-aes-caam-qi",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = ctr_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1480,7 +1508,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "rfc3686-ctr-aes-caam-qi",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = rfc3686_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
@@ -2523,10 +2551,9 @@ int caam_qi_algapi_init(struct device *ctrldev)
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
- if (caam_dpaa2) {
- dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- return -ENODEV;
- }
+ /* Make sure this runs only on (DPAA 1.x) QI */
+ if (!priv->qi_present || caam_dpaa2)
+ return 0;
/*
* Register crypto algorithms the device supports.
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 06bf32c32cbd..3443f6d6dd83 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -15,6 +15,7 @@
#include "key_gen.h"
#include "caamalg_desc.h"
#include "caamhash_desc.h"
+#include "dpseci-debugfs.h"
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -198,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -209,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -247,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -329,7 +322,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_keys keys;
- u32 flags;
int err;
err = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -340,14 +332,8 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
goto badkey;
- flags = crypto_aead_get_flags(aead);
- err = __des3_verify_key(&flags, keys.enckey);
- if (unlikely(err)) {
- crypto_aead_set_flags(aead, flags);
- goto out;
- }
-
- err = aead_setkey(aead, key, keylen);
+ err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
+ aead_setkey(aead, key, keylen);
out:
memzero_explicit(&keys, sizeof(keys));
@@ -719,6 +705,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -731,7 +722,13 @@ static int gcm_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
+ int ret;
+ ret = aes_check_keylen(keylen);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -805,6 +802,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -817,9 +819,13 @@ static int rfc4106_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
+ int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -900,6 +906,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ if (authsize != 16)
+ return -EINVAL;
+
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
@@ -911,9 +920,13 @@ static int rfc4543_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
+ int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -931,7 +944,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
}
static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen, const u32 ctx1_iv_off)
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_skcipher_alg *alg =
@@ -941,34 +954,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_flc *flc;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128) &&
- ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
- OP_ALG_ALGSEL_CHACHA20);
const bool is_rfc3686 = alg->caam.rfc3686;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
-
- /*
- * RFC3686 specific:
- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- * | *key = {KEY, NONCE}
- */
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
- }
-
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@@ -996,11 +986,92 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != CHACHA_KEY_SIZE) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
- return unlikely(des3_verify_key(skcipher, key)) ?:
- skcipher_setkey(skcipher, key, keylen);
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
}
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -1227,10 +1298,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
@@ -1250,17 +1319,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((status & JRSTA_CCBERR_ERRID_MASK) ==
- JRSTA_CCBERR_ERRID_ICVCHK)
- ecode = -EBADMSG;
- else
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
aead_unmap(ctx->dev, edesc, req);
qi_cache_free(edesc);
@@ -1325,18 +1385,12 @@ static int aead_decrypt(struct aead_request *req)
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_encrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
}
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_decrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
}
static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
@@ -1352,10 +1406,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1371,7 +1423,9 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1390,10 +1444,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1409,7 +1461,9 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
* ciphertext block (CBC mode) or last counter (CTR mode).
* This is used e.g. by the CTS mode.
*/
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1423,6 +1477,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
struct caam_request *caam_req = skcipher_request_ctx(req);
int ret;
+ if (!req->cryptlen)
+ return 0;
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1451,6 +1508,8 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct caam_request *caam_req = skcipher_request_ctx(req);
int ret;
+ if (!req->cryptlen)
+ return 0;
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1545,7 +1604,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-aes-caam-qi2",
.cra_blocksize = AES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = aes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1577,7 +1636,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1593,7 +1652,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "ctr-aes-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = ctr_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1611,7 +1670,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = rfc3686_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
@@ -1650,7 +1709,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "chacha20-caam-qi2",
.cra_blocksize = 1,
},
- .setkey = skcipher_setkey,
+ .setkey = chacha20_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = CHACHA_KEY_SIZE,
@@ -2918,6 +2977,7 @@ enum hash_optype {
/**
* caam_hash_ctx - ahash per-session context
* @flc: Flow Contexts array
+ * @key: authentication key
* @flc_dma: I/O virtual addresses of the Flow Contexts
* @dev: dpseci device
* @ctx_len: size of Context Register
@@ -2925,6 +2985,7 @@ enum hash_optype {
*/
struct caam_hash_ctx {
struct caam_flc flc[HASH_NUM_OP];
+ u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
dma_addr_t flc_dma[HASH_NUM_OP];
struct device *dev;
int ctx_len;
@@ -3094,10 +3155,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- if (err)
- caam_qi2_strstatus(res->dev, err);
-
- res->err = err;
+ res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
complete(&res->completion);
}
@@ -3228,6 +3286,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->adata.key_virt = key;
ctx->adata.key_inline = true;
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ if (keylen > ctx->adata.keylen_pad) {
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
+ ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
+ }
+
ret = ahash_set_sh_desc(ahash);
kfree(hashed_key);
return ret;
@@ -3282,10 +3353,8 @@ static void ahash_done(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
memcpy(req->result, state->caam_ctx, digestsize);
@@ -3310,10 +3379,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
switch_buf(state);
@@ -3343,10 +3410,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
memcpy(req->result, state->caam_ctx, digestsize);
@@ -3371,10 +3436,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
- if (unlikely(status)) {
- caam_qi2_strstatus(ctx->dev, status);
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
switch_buf(state);
@@ -4466,11 +4529,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
+ if (alg->setkey) {
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
+ dev_err(ctx->dev, "unable to map key\n");
+ return -ENOMEM;
+ }
+ }
+
dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->dev, dma_addr)) {
dev_err(ctx->dev, "unable to map shared descriptors\n");
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
return -ENOMEM;
}
@@ -4496,6 +4575,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
@@ -4700,7 +4783,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
if (unlikely(fd_err))
- dev_err(priv->dev, "FD error: %08x\n", fd_err);
+ dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
/*
* FD[ADDR] is guaranteed to be valid, irrespective of errors reported
@@ -5098,6 +5181,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
goto err_bind;
}
+ dpaa2_dpseci_debugfs_init(priv);
+
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
struct caam_skcipher_alg *t_alg = driver_algs + i;
@@ -5265,6 +5350,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
dev = &ls_dev->dev;
priv = dev_get_drvdata(dev);
+ dpaa2_dpseci_debugfs_exit(priv);
+
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
struct caam_aead_alg *t_alg = driver_aeads + i;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index be5085451053..706736776b47 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -10,12 +10,13 @@
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <linux/threads.h>
+#include <linux/netdevice.h>
#include "dpseci.h"
#include "desc_constr.h"
#define DPAA2_CAAM_STORE_SIZE 16
/* NAPI weight *must* be a multiple of the store size. */
-#define DPAA2_CAAM_NAPI_WEIGHT 64
+#define DPAA2_CAAM_NAPI_WEIGHT 512
/* The congestion entrance threshold was chosen so that on LS2088
* we support the maximum throughput for the available memory
@@ -64,6 +65,7 @@ struct dpaa2_caam_priv {
struct iommu_domain *domain;
struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+ struct dentry *dfs_root;
};
/**
@@ -90,33 +92,6 @@ struct dpaa2_caam_priv_per_cpu {
struct dpaa2_io *dpio;
};
-/*
- * The CAAM QI hardware constructs a job descriptor which points
- * to shared descriptor (as pointed by context_a of FQ to CAAM).
- * When the job descriptor is executed by deco, the whole job
- * descriptor together with shared descriptor gets loaded in
- * deco buffer which is 64 words long (each 32-bit).
- *
- * The job descriptor constructed by QI hardware has layout:
- *
- * HEADER (1 word)
- * Shdesc ptr (1 or 2 words)
- * SEQ_OUT_PTR (1 word)
- * Out ptr (1 or 2 words)
- * Out length (1 word)
- * SEQ_IN_PTR (1 word)
- * In ptr (1 or 2 words)
- * In length (1 word)
- *
- * The shdesc ptr is used to fetch shared descriptor contents
- * into deco buffer.
- *
- * Apart from shdesc contents, the total number of words that
- * get loaded in deco buffer are '8' or '11'. The remaining words
- * in deco buffer can be used for storing shared descriptor.
- */
-#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
-
/* Length of a single buffer in the QI driver memory cache */
#define CAAM_QI_MEMCACHE_SIZE 512
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e4ac5d591ad6..65399cb2a770 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -95,8 +95,8 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t key_dma;
enum dma_data_direction dir;
+ enum dma_data_direction key_dir;
struct device *jrdev;
int ctx_len;
struct alginfo adata;
@@ -282,13 +282,10 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
struct device *jrdev = ctx->jrdev;
u32 *desc;
- /* key is loaded from memory for UPDATE and FINALIZE states */
- ctx->adata.key_dma = ctx->key_dma;
-
/* shared descriptor for ahash_update */
desc = ctx->sh_desc_update;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
- ctx->ctx_len, ctx->ctx_len, 0);
+ ctx->ctx_len, ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
@@ -298,7 +295,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for ahash_{final,finup} */
desc = ctx->sh_desc_fin;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
- digestsize, ctx->ctx_len, 0);
+ digestsize, ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
@@ -311,7 +308,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for first invocation of ahash_update */
desc = ctx->sh_desc_update_first;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
- ctx->ctx_len, ctx->key_dma);
+ ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
@@ -321,7 +318,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for ahash_digest */
desc = ctx->sh_desc_digest;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
- digestsize, ctx->ctx_len, 0);
+ digestsize, ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
@@ -340,7 +337,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for ahash_update */
desc = ctx->sh_desc_update;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
- ctx->ctx_len, ctx->ctx_len, 0);
+ ctx->ctx_len, ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
@@ -350,7 +347,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for ahash_{final,finup} */
desc = ctx->sh_desc_fin;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
- digestsize, ctx->ctx_len, 0);
+ digestsize, ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
@@ -360,7 +357,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for first invocation of ahash_update */
desc = ctx->sh_desc_update_first;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
- ctx->ctx_len, 0);
+ ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
@@ -370,7 +367,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
/* shared descriptor for ahash_digest */
desc = ctx->sh_desc_digest;
cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
- digestsize, ctx->ctx_len, 0);
+ digestsize, ctx->ctx_len);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
@@ -480,6 +477,18 @@ static int ahash_setkey(struct crypto_ahash *ahash,
goto bad_free_key;
memcpy(ctx->key, key, keylen);
+
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm>
+ * would result in invalid opcodes (last bytes of user key) in
+ * the resulting descriptor. Use DKP<ptr,imm> instead => both
+ * virtual and dma key addresses are needed.
+ */
+ if (keylen > ctx->adata.keylen_pad)
+ dma_sync_single_for_device(ctx->jrdev,
+ ctx->adata.key_dma,
+ ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
} else {
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
keylen, CAAM_MAX_HASH_KEY_SIZE);
@@ -501,8 +510,14 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
+ if (keylen != AES_KEYSIZE_128) {
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
+ DMA_TO_DEVICE);
ctx->adata.keylen = keylen;
print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
@@ -515,6 +530,13 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
/* key is immediate data for all cmac shared descriptors */
ctx->adata.key_virt = key;
@@ -538,7 +560,7 @@ struct ahash_edesc {
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
- u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+ u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
struct sec4_sg_entry sec4_sg[0];
};
@@ -584,12 +606,13 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
memcpy(req->result, state->caam_ctx, digestsize);
@@ -599,7 +622,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
@@ -611,12 +634,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
switch_buf(state);
@@ -630,7 +654,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
@@ -642,12 +666,13 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
memcpy(req->result, state->caam_ctx, digestsize);
@@ -657,7 +682,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
@@ -669,12 +694,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
switch_buf(state);
@@ -688,7 +714,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
/*
@@ -1812,40 +1838,50 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
if (is_xcbc_aes(caam_hash->alg_type)) {
ctx->dir = DMA_TO_DEVICE;
+ ctx->key_dir = DMA_BIDIRECTIONAL;
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
ctx->ctx_len = 48;
-
- ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
- ARRAY_SIZE(ctx->key),
- DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
- caam_jr_free(ctx->jrdev);
- return -ENOMEM;
- }
} else if (is_cmac_aes(caam_hash->alg_type)) {
ctx->dir = DMA_TO_DEVICE;
+ ctx->key_dir = DMA_NONE;
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
ctx->ctx_len = 32;
} else {
- ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ if (priv->era >= 6) {
+ ctx->dir = DMA_BIDIRECTIONAL;
+ ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
+ } else {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->key_dir = DMA_NONE;
+ }
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->ctx_len = runninglen[(ctx->adata.algtype &
OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
}
+ if (ctx->key_dir != DMA_NONE) {
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ ctx->key_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+ }
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
- if (is_xcbc_aes(caam_hash->alg_type))
- dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+ if (ctx->key_dir != DMA_NONE)
+ dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
ARRAY_SIZE(ctx->key),
- DMA_BIDIRECTIONAL,
+ ctx->key_dir,
DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
@@ -1878,9 +1914,9 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (is_xcbc_aes(ctx->adata.algtype))
- dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
- ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
+ if (ctx->key_dir != DMA_NONE)
+ dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key), ctx->key_dir,
DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
@@ -1971,7 +2007,7 @@ int caam_algapi_hash_init(struct device *ctrldev)
* is not present.
*/
if (!md_inst)
- return -ENODEV;
+ return 0;
/* Limit digest size based on LP256 */
if (md_vid == CHA_VER_VID_MD_LP256)
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index 71d018343ee4..78383d77da99 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -83,10 +83,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ahash);
* @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
* @digestsize: algorithm's digest size
* @ctx_len: size of Context Register
- * @key_dma: I/O Virtual Address of the key
*/
void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
- int digestsize, int ctx_len, dma_addr_t key_dma)
+ int digestsize, int ctx_len)
{
u32 *skip_key_load;
@@ -136,7 +135,7 @@ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
LDST_SRCDST_BYTE_CONTEXT);
if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT)
/* Save K1 */
- append_fifo_store(desc, key_dma, adata->keylen,
+ append_fifo_store(desc, adata->key_dma, adata->keylen,
LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
}
EXPORT_SYMBOL(cnstr_shdsc_sk_hash);
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
index 6947ee1f200c..4f369b8cb6ae 100644
--- a/drivers/crypto/caam/caamhash_desc.h
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -25,5 +25,5 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
int digestsize, int ctx_len, bool import_ctx, int era);
void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
- int digestsize, int ctx_len, dma_addr_t key_dma);
+ int digestsize, int ctx_len);
#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 80574106af29..83f96d4f86e0 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -17,18 +17,29 @@
#include "sg_sw_sec4.h"
#include "caampkc.h"
-#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
- sizeof(struct rsa_priv_f1_pdb))
+ SIZEOF_RSA_PRIV_F1_PDB)
#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
- sizeof(struct rsa_priv_f2_pdb))
+ SIZEOF_RSA_PRIV_F2_PDB)
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
- sizeof(struct rsa_priv_f3_pdb))
+ SIZEOF_RSA_PRIV_F3_PDB)
#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
/* buffer filled with zeros, used for padding */
static u8 *zero_buffer;
+/*
+ * variable used to avoid double free of resources in case
+ * algorithm registration was unsuccessful
+ */
+static bool init_done;
+
+struct caam_akcipher_alg {
+ struct akcipher_alg akcipher;
+ bool registered;
+};
+
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
@@ -107,9 +118,10 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -117,7 +129,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
@@ -125,9 +137,10 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -135,7 +148,7 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
@@ -143,9 +156,10 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -153,7 +167,7 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
@@ -161,9 +175,10 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -171,7 +186,7 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
/**
@@ -867,7 +882,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->e)
goto err;
@@ -889,8 +904,6 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
-
return 0;
err:
caam_rsa_free_key(rsa_key);
@@ -971,11 +984,11 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->d)
goto err;
- rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->e)
goto err;
@@ -998,9 +1011,6 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
- memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
-
caam_rsa_set_priv_key_form(ctx, &raw_key);
return 0;
@@ -1053,22 +1063,24 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
caam_jr_free(ctx->dev);
}
-static struct akcipher_alg caam_rsa = {
- .encrypt = caam_rsa_enc,
- .decrypt = caam_rsa_dec,
- .set_pub_key = caam_rsa_set_pub_key,
- .set_priv_key = caam_rsa_set_priv_key,
- .max_size = caam_rsa_max_size,
- .init = caam_rsa_init_tfm,
- .exit = caam_rsa_exit_tfm,
- .reqsize = sizeof(struct caam_rsa_req_ctx),
- .base = {
- .cra_name = "rsa",
- .cra_driver_name = "rsa-caam",
- .cra_priority = 3000,
- .cra_module = THIS_MODULE,
- .cra_ctxsize = sizeof(struct caam_rsa_ctx),
- },
+static struct caam_akcipher_alg caam_rsa = {
+ .akcipher = {
+ .encrypt = caam_rsa_enc,
+ .decrypt = caam_rsa_dec,
+ .set_pub_key = caam_rsa_set_pub_key,
+ .set_priv_key = caam_rsa_set_priv_key,
+ .max_size = caam_rsa_max_size,
+ .init = caam_rsa_init_tfm,
+ .exit = caam_rsa_exit_tfm,
+ .reqsize = sizeof(struct caam_rsa_req_ctx),
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-caam",
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx),
+ },
+ }
};
/* Public Key Cryptography module initialization handler */
@@ -1077,6 +1089,7 @@ int caam_pkc_init(struct device *ctrldev)
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
u32 pk_inst;
int err;
+ init_done = false;
/* Determine public key hardware accelerator presence. */
if (priv->era < 10)
@@ -1095,12 +1108,15 @@ int caam_pkc_init(struct device *ctrldev)
if (!zero_buffer)
return -ENOMEM;
- err = crypto_register_akcipher(&caam_rsa);
+ err = crypto_register_akcipher(&caam_rsa.akcipher);
+
if (err) {
kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
- caam_rsa.base.cra_driver_name);
+ caam_rsa.akcipher.base.cra_driver_name);
} else {
+ init_done = true;
+ caam_rsa.registered = true;
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
}
@@ -1109,6 +1125,11 @@ int caam_pkc_init(struct device *ctrldev)
void caam_pkc_exit(void)
{
+ if (!init_done)
+ return;
+
+ if (caam_rsa.registered)
+ crypto_unregister_akcipher(&caam_rsa.akcipher);
+
kfree(zero_buffer);
- crypto_unregister_akcipher(&caam_rsa);
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 561bcb535184..e8baacaabe07 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -53,7 +53,7 @@
L1_CACHE_BYTES)
/* length of descriptors */
-#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2)
#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
@@ -80,6 +80,12 @@ struct caam_rng_ctx {
static struct caam_rng_ctx *rng_ctx;
+/*
+ * Variable used to avoid double free of resources in case
+ * algorithm registration was unsuccessful
+ */
+static bool init_done;
+
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
if (bd->addr)
@@ -296,6 +302,9 @@ static struct hwrng caam_rng = {
void caam_rng_exit(void)
{
+ if (!init_done)
+ return;
+
caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
kfree(rng_ctx);
@@ -307,6 +316,7 @@ int caam_rng_init(struct device *ctrldev)
u32 rng_inst;
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int err;
+ init_done = false;
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
@@ -333,7 +343,12 @@ int caam_rng_init(struct device *ctrldev)
goto free_rng_ctx;
dev_info(dev, "registering rng-caam\n");
- return hwrng_register(&caam_rng);
+
+ err = hwrng_register(&caam_rng);
+ if (!err) {
+ init_done = true;
+ return err;
+ }
free_rng_ctx:
kfree(rng_ctx);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 8639b2df0371..60e2a54c19f1 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -32,7 +32,7 @@
#include <crypto/null.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/des.h>
+#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 4e43ca4d3656..db22777d59b4 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -26,16 +26,6 @@ EXPORT_SYMBOL(caam_dpaa2);
#endif
/*
- * i.MX targets tend to have clock control subsystems that can
- * enable/disable clocking to our device.
- */
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
- char *clk_name)
-{
- return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
-}
-
-/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
@@ -107,7 +97,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
int i;
- if (ctrlpriv->virt_en == 1) {
+ if (ctrlpriv->virt_en == 1 ||
+ /*
+ * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * and the following steps should be performed regardless
+ */
+ of_machine_is_compatible("fsl,imx8mq")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -342,13 +337,6 @@ static int caam_remove(struct platform_device *pdev)
/* Unmap controller region */
iounmap(ctrl);
- /* shut clocks off before finalizing shutdown */
- clk_disable_unprepare(ctrlpriv->caam_ipg);
- if (ctrlpriv->caam_mem)
- clk_disable_unprepare(ctrlpriv->caam_mem);
- clk_disable_unprepare(ctrlpriv->caam_aclk);
- if (ctrlpriv->caam_emi_slow)
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -497,20 +485,99 @@ static const struct of_device_id caam_match[] = {
};
MODULE_DEVICE_TABLE(of, caam_match);
+struct caam_imx_data {
+ const struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static const struct clk_bulk_data caam_imx6_clks[] = {
+ { .id = "ipg" },
+ { .id = "mem" },
+ { .id = "aclk" },
+ { .id = "emi_slow" },
+};
+
+static const struct caam_imx_data caam_imx6_data = {
+ .clks = caam_imx6_clks,
+ .num_clks = ARRAY_SIZE(caam_imx6_clks),
+};
+
+static const struct clk_bulk_data caam_imx7_clks[] = {
+ { .id = "ipg" },
+ { .id = "aclk" },
+};
+
+static const struct caam_imx_data caam_imx7_data = {
+ .clks = caam_imx7_clks,
+ .num_clks = ARRAY_SIZE(caam_imx7_clks),
+};
+
+static const struct clk_bulk_data caam_imx6ul_clks[] = {
+ { .id = "ipg" },
+ { .id = "mem" },
+ { .id = "aclk" },
+};
+
+static const struct caam_imx_data caam_imx6ul_data = {
+ .clks = caam_imx6ul_clks,
+ .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
+};
+
+static const struct soc_device_attribute caam_imx_soc_table[] = {
+ { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
+ { .soc_id = "i.MX6*", .data = &caam_imx6_data },
+ { .soc_id = "i.MX7*", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .family = "Freescale i.MX" },
+ { /* sentinel */ }
+};
+
+static void disable_clocks(void *data)
+{
+ struct caam_drv_private *ctrlpriv = data;
+
+ clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
+}
+
+static int init_clocks(struct device *dev, const struct caam_imx_data *data)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ int ret;
+
+ ctrlpriv->num_clks = data->num_clks;
+ ctrlpriv->clks = devm_kmemdup(dev, data->clks,
+ data->num_clks * sizeof(data->clks[0]),
+ GFP_KERNEL);
+ if (!ctrlpriv->clks)
+ return -ENOMEM;
+
+ ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
+ if (ret) {
+ dev_err(dev,
+ "Failed to request all necessary clocks\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
+ if (ret) {
+ dev_err(dev,
+ "Failed to prepare/enable all necessary clocks\n");
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
- static const struct soc_device_attribute imx_soc[] = {
- {.family = "Freescale i.MX"},
- {},
- };
+ const struct soc_device_attribute *imx_soc_match;
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
- struct clk *clk;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
@@ -527,103 +594,68 @@ static int caam_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ctrlpriv);
nprop = pdev->dev.of_node;
- caam_imx = (bool)soc_device_match(imx_soc);
+ imx_soc_match = soc_device_match(caam_imx_soc_table);
+ caam_imx = (bool)imx_soc_match;
- /* Enable clocking */
- clk = caam_drv_identify_clk(&pdev->dev, "ipg");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM ipg clk: %d\n", ret);
- return ret;
- }
- ctrlpriv->caam_ipg = clk;
-
- if (!of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s") &&
- !of_machine_is_compatible("fsl,imx7ulp")) {
- clk = caam_drv_identify_clk(&pdev->dev, "mem");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM mem clk: %d\n", ret);
- return ret;
+ if (imx_soc_match) {
+ if (!imx_soc_match->data) {
+ dev_err(dev, "No clock data provided for i.MX SoC");
+ return -EINVAL;
}
- ctrlpriv->caam_mem = clk;
- }
- clk = caam_drv_identify_clk(&pdev->dev, "aclk");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM aclk clk: %d\n", ret);
- return ret;
- }
- ctrlpriv->caam_aclk = clk;
-
- if (!of_machine_is_compatible("fsl,imx6ul") &&
- !of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s") &&
- !of_machine_is_compatible("fsl,imx7ulp")) {
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
+ ret = init_clocks(dev, imx_soc_match->data);
+ if (ret)
return ret;
- }
- ctrlpriv->caam_emi_slow = clk;
- }
-
- ret = clk_prepare_enable(ctrlpriv->caam_ipg);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
- return ret;
- }
-
- if (ctrlpriv->caam_mem) {
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
- ret);
- goto disable_caam_ipg;
- }
}
- ret = clk_prepare_enable(ctrlpriv->caam_aclk);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
- goto disable_caam_mem;
- }
-
- if (ctrlpriv->caam_emi_slow) {
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
- }
- }
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
- if (ctrl == NULL) {
+ if (!ctrl) {
dev_err(dev, "caam: of_iomap() failed\n");
- ret = -ENOMEM;
- goto disable_caam_emi_slow;
+ return -ENOMEM;
}
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
(CSTA_PLEND | CSTA_ALT_PLEND));
-
- /* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+ if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
+ caam_ptr_sz = sizeof(u64);
+ else
+ caam_ptr_sz = sizeof(u32);
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+
+#ifdef CONFIG_CAAM_QI
+ /* If (DPAA 1.x) QI present, check whether dependencies are available */
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
+ ret = qman_is_probed();
+ if (!ret) {
+ ret = -EPROBE_DEFER;
+ goto iounmap_ctrl;
+ } else if (ret < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ ret = -ENODEV;
+ goto iounmap_ctrl;
+ }
+
+ ret = qman_portals_probed();
+ if (!ret) {
+ ret = -EPROBE_DEFER;
+ goto iounmap_ctrl;
+ } else if (ret < 0) {
+ dev_err(dev, "failing probe due to qman portals probe error\n");
+ ret = -ENODEV;
+ goto iounmap_ctrl;
+ }
+ }
+#endif
/* Allocating the BLOCK_OFFSET based on the supported page size on
* the platform
*/
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
if (pg_size == 0)
BLOCK_OFFSET = PG_SIZE_4K;
else
@@ -648,7 +680,6 @@ static int caam_probe(struct platform_device *pdev)
* In case of SoCs with Management Complex, MC f/w performs
* the configuration.
*/
- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
ctrlpriv->mc_en = !!np;
of_node_put(np);
@@ -688,16 +719,7 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (caam_dpaa2)
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
- else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
- else
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
- } else {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- }
+ ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
goto iounmap_ctrl;
@@ -719,7 +741,6 @@ static int caam_probe(struct platform_device *pdev)
#endif
/* Check to see if (DPAA 1.x) QI present. If so, enable */
- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present && !caam_dpaa2) {
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
((__force uint8_t *)ctrl +
@@ -908,16 +929,6 @@ shutdown_qi:
#endif
iounmap_ctrl:
iounmap(ctrl);
-disable_caam_emi_slow:
- if (ctrlpriv->caam_emi_slow)
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-disable_caam_aclk:
- clk_disable_unprepare(ctrlpriv->caam_aclk);
-disable_caam_mem:
- if (ctrlpriv->caam_mem)
- clk_disable_unprepare(ctrlpriv->caam_mem);
-disable_caam_ipg:
- clk_disable_unprepare(ctrlpriv->caam_ipg);
return ret;
}
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 5988a26a2441..62ce6421bb3f 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -14,9 +14,41 @@
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
-#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_PTR_SZ caam_ptr_sz
+#define CAAM_PTR_SZ_MAX sizeof(dma_addr_t)
+#define CAAM_PTR_SZ_MIN sizeof(u32)
#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
-#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+#define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3)
+#define DESC_JOB_IO_LEN __DESC_JOB_IO_LEN(CAAM_PTR_SZ)
+#define DESC_JOB_IO_LEN_MAX __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MAX)
+#define DESC_JOB_IO_LEN_MIN __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MIN)
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / CAAM_CMD_SZ)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -37,6 +69,7 @@
(LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
extern bool caam_little_end;
+extern size_t caam_ptr_sz;
/*
* HW fetches 4 S/G table entries at a time, irrespective of how many entries
@@ -103,9 +136,15 @@ static inline void init_job_desc_pdb(u32 * const desc, u32 options,
static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
- dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+ if (caam_ptr_sz == sizeof(dma_addr_t)) {
+ dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
- *offset = cpu_to_caam_dma(ptr);
+ *offset = cpu_to_caam_dma(ptr);
+ } else {
+ u32 *offset = (u32 *)desc_end(desc);
+
+ *offset = cpu_to_caam_dma(ptr);
+ }
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
CAAM_PTR_SZ / CAAM_CMD_SZ);
@@ -457,8 +496,8 @@ do { \
* functions where it is used.
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
- * @key: address where algorithm key resides; virtual address if key_inline
- * is true, dma (bus) address if key_inline is false.
+ * @key_dma: dma (bus) address where algorithm key resides
+ * @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
*/
@@ -466,10 +505,8 @@ struct alginfo {
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
- union {
- dma_addr_t key_dma;
- const void *key_virt;
- };
+ dma_addr_t key_dma;
+ const void *key_virt;
bool key_inline;
};
@@ -535,14 +572,26 @@ static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
if (adata->key_inline) {
int words;
- append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
- OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
- adata->keylen);
- append_data(desc, adata->key_virt, adata->keylen);
+ if (adata->keylen > adata->keylen_pad) {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR |
+ OP_PCL_DKP_DST_IMM | adata->keylen);
+ append_ptr(desc, adata->key_dma);
+
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ CAAM_PTR_SZ) / CAAM_CMD_SZ;
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM |
+ OP_PCL_DKP_DST_IMM | adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) /
+ CAAM_CMD_SZ;
+ }
/* Reserve space in descriptor buffer for the derived key */
- words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
- ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
if (words)
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
} else {
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
new file mode 100644
index 000000000000..c5bfc923abd8
--- /dev/null
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include "dpseci-debugfs.h"
+
+static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
+ u32 fqid, fcnt, bcnt;
+ int i, err;
+
+ seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev));
+ seq_printf(file, "%s%16s%16s\n",
+ "Rx-VFQID",
+ "Pending frames",
+ "Pending bytes");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ fqid = priv->rx_queue_attr[i].fqid;
+ err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt);
+ if (err)
+ continue;
+
+ seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt);
+ }
+
+ seq_printf(file, "%s%16s%16s\n",
+ "Tx-VFQID",
+ "Pending frames",
+ "Pending bytes");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ fqid = priv->tx_queue_attr[i].fqid;
+ err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt);
+ if (err)
+ continue;
+
+ seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt);
+ }
+
+ return 0;
+}
+
+static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_caam_priv *priv;
+
+ priv = (struct dpaa2_caam_priv *)inode->i_private;
+
+ err = single_open(file, dpseci_dbg_fqs_show, priv);
+ if (err < 0)
+ dev_err(priv->dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpseci_dbg_fq_ops = {
+ .open = dpseci_dbg_fqs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv)
+{
+ priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL);
+
+ debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv,
+ &dpseci_dbg_fq_ops);
+}
+
+void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv)
+{
+ debugfs_remove_recursive(priv->dfs_root);
+}
diff --git a/drivers/crypto/caam/dpseci-debugfs.h b/drivers/crypto/caam/dpseci-debugfs.h
new file mode 100644
index 000000000000..bc22af7bec37
--- /dev/null
+++ b/drivers/crypto/caam/dpseci-debugfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef DPSECI_DEBUGFS_H
+#define DPSECI_DEBUGFS_H
+
+#include <linux/dcache.h>
+#include "caamalg_qi2.h"
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv);
+void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv);
+#else
+static inline void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) {}
+static inline void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPSECI_DEBUGFS_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 4f0d45865aa2..17c6108b6d41 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -56,6 +56,9 @@ EXPORT_SYMBOL(caam_little_end);
bool caam_imx;
EXPORT_SYMBOL(caam_imx);
+size_t caam_ptr_sz;
+EXPORT_SYMBOL(caam_ptr_sz);
+
static const struct {
u8 value;
const char *error_text;
@@ -118,6 +121,7 @@ static const struct {
u8 value;
const char *error_text;
} qi_error_list[] = {
+ { 0x00, "No error" },
{ 0x1F, "Job terminated by FQ or ICID flush" },
{ 0x20, "FD format error"},
{ 0x21, "FD command format error"},
@@ -210,8 +214,8 @@ static const char * const rng_err_id_list[] = {
"Secure key generation",
};
-static void report_ccb_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_ccb_status(struct device *jrdev, const u32 status,
+ const char *error)
{
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
JRSTA_CCBERR_CHAID_SHIFT;
@@ -247,22 +251,27 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
* CCB ICV check failures are part of normal operation life;
* we leave the upper layers to do what they want with them.
*/
- if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
- dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
- status, error, idx_str, idx,
- cha_str, cha_err_code,
- err_str, err_err_code);
+ if (err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+ return -EBADMSG;
+
+ dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status,
+ error, idx_str, idx, cha_str, cha_err_code,
+ err_str, err_err_code);
+
+ return -EINVAL;
}
-static void report_jump_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_jump_status(struct device *jrdev, const u32 status,
+ const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
+
+ return -EINVAL;
}
-static void report_deco_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_deco_status(struct device *jrdev, const u32 status,
+ const char *error)
{
u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
@@ -288,10 +297,12 @@ static void report_deco_status(struct device *jrdev, const u32 status,
dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
status, error, idx_str, idx, err_str, err_err_code);
+
+ return -EINVAL;
}
-static void report_qi_status(struct device *qidev, const u32 status,
- const char *error)
+static int report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
{
u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
const char *err_str = "unidentified error value 0x";
@@ -309,27 +320,33 @@ static void report_qi_status(struct device *qidev, const u32 status,
dev_err(qidev, "%08x: %s: %s%s\n",
status, error, err_str, err_err_code);
+
+ return -EINVAL;
}
-static void report_jr_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_jr_status(struct device *jrdev, const u32 status,
+ const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
+
+ return -EINVAL;
}
-static void report_cond_code_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_cond_code_status(struct device *jrdev, const u32 status,
+ const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
+
+ return -EINVAL;
}
-void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
+int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
- void (*report_ssed)(struct device *jrdev, const u32 status,
- const char *error);
+ int (*report_ssed)(struct device *jrdev, const u32 status,
+ const char *error);
const char *error;
} status_src[16] = {
{ NULL, "No error" },
@@ -357,11 +374,14 @@ void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
* Otherwise print the error source name.
*/
if (status_src[ssrc].report_ssed)
- status_src[ssrc].report_ssed(jrdev, status, error);
- else if (error)
+ return status_src[ssrc].report_ssed(jrdev, status, error);
+
+ if (error)
dev_err(jrdev, "%d: %s\n", ssrc, error);
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
+
+ return -EINVAL;
}
EXPORT_SYMBOL(caam_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index d9726e66edbf..16809fa8fec7 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -12,7 +12,7 @@
#define CAAM_ERROR_STR_MAX 302
-void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+int caam_strstatus(struct device *dev, u32 status, bool qi_v2);
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 6af84bbc612c..731b06becd9c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -10,6 +10,8 @@
#ifndef INTERN_H
#define INTERN_H
+#include "ctrl.h"
+
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
@@ -53,10 +55,11 @@ struct caam_drv_private_jr {
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
u32 inpring_avail; /* Number of free entries in input ring */
int head; /* entinfo (s/w ring) head index */
- dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
+ void *inpring; /* Base of input ring, alloc
+ * DMA-safe */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
- struct jr_outentry *outring; /* Base of output ring, DMA-safe */
+ void *outring; /* Base of output ring, DMA-safe */
};
/*
@@ -92,11 +95,8 @@ struct caam_drv_private {
Handles of the RNG4 block are initialized
by this driver */
- struct clk *caam_ipg;
- struct clk *caam_mem;
- struct clk *caam_aclk;
- struct clk *caam_emi_slow;
-
+ struct clk_bulk_data *clks;
+ int num_clks;
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
@@ -215,4 +215,22 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
+static inline u64 caam_get_dma_mask(struct device *dev)
+{
+ struct device_node *nprop = dev->of_node;
+
+ if (caam_ptr_sz != sizeof(u64))
+ return DMA_BIT_MASK(32);
+
+ if (caam_dpaa2)
+ return DMA_BIT_MASK(49);
+
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") ||
+ of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ return DMA_BIT_MASK(40);
+
+ return DMA_BIT_MASK(36);
+}
+
+
#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index cea811fed320..fc97cde27059 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -108,25 +108,12 @@ static int caam_reset_hw_jr(struct device *dev)
static int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- dma_addr_t inpbusaddr, outbusaddr;
int ret;
ret = caam_reset_hw_jr(dev);
tasklet_kill(&jrp->irqtask);
- /* Release interrupt */
- free_irq(jrp->irq, dev);
-
- /* Free rings */
- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
- kfree(jrp->entinfo);
-
return ret;
}
@@ -159,7 +146,6 @@ static int caam_jr_remove(struct platform_device *pdev)
ret = caam_jr_shutdown(jrdev);
if (ret)
dev_err(jrdev, "Failed to shut down job ring\n");
- irq_dispose_mapping(jrpriv->irq);
return ret;
}
@@ -224,7 +210,7 @@ static void caam_jr_dequeue(unsigned long devarg)
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
- if (jrp->outring[hw_idx].desc ==
+ if (jr_outentry_desc(jrp->outring, hw_idx) ==
caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
break; /* found */
}
@@ -233,7 +219,8 @@ static void caam_jr_dequeue(unsigned long devarg)
/* Unmap just-run descriptor so we can post-process */
dma_unmap_single(dev,
- caam_dma_to_cpu(jrp->outring[hw_idx].desc),
+ caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
+ hw_idx)),
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);
@@ -244,7 +231,8 @@ static void caam_jr_dequeue(unsigned long devarg)
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
- userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
+ userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
+ hw_idx));
/*
* Make sure all information from the job has been obtained
@@ -399,7 +387,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
+ jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -441,35 +429,26 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-
- /* Connect job ring interrupt handler. */
- error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- dev_name(dev), dev);
- if (error) {
- dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
- jrp->ridx, jrp->irq);
- goto out_kill_deq;
- }
-
error = caam_reset_hw_jr(dev);
if (error)
- goto out_free_irq;
+ return error;
- error = -ENOMEM;
- jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
- JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
+ jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
+ JOBR_DEPTH, &inpbusaddr,
+ GFP_KERNEL);
if (!jrp->inpring)
- goto out_free_irq;
+ return -ENOMEM;
- jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
- JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+ jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
+ JOBR_DEPTH, &outbusaddr,
+ GFP_KERNEL);
if (!jrp->outring)
- goto out_free_inpring;
+ return -ENOMEM;
- jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
+ jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
+ GFP_KERNEL);
if (!jrp->entinfo)
- goto out_free_outring;
+ return -ENOMEM;
for (i = 0; i < JOBR_DEPTH; i++)
jrp->entinfo[i].desc_addr_dma = !0;
@@ -493,22 +472,24 @@ static int caam_jr_init(struct device *dev)
(JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
- return 0;
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
+ /* Connect job ring interrupt handler. */
+ error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
+ if (error) {
+ dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+ jrp->ridx, jrp->irq);
+ tasklet_kill(&jrp->irqtask);
+ }
-out_free_outring:
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
-out_free_inpring:
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
-out_free_irq:
- free_irq(jrp->irq, dev);
-out_kill_deq:
- tasklet_kill(&jrp->irqtask);
return error;
}
+static void caam_jr_irq_dispose_mapping(void *data)
+{
+ irq_dispose_mapping((unsigned long)data);
+}
/*
* Probe routine for each detected JobR subsystem.
@@ -520,6 +501,7 @@ static int caam_jr_probe(struct platform_device *pdev)
struct caam_job_ring __iomem *ctrl;
struct caam_drv_private_jr *jrpriv;
static int total_jobrs;
+ struct resource *r;
int error;
jrdev = &pdev->dev;
@@ -535,45 +517,43 @@ static int caam_jr_probe(struct platform_device *pdev)
nprop = pdev->dev.of_node;
/* Get configuration properties from device tree */
/* First, get register page */
- ctrl = of_iomap(nprop, 0);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(jrdev, "platform_get_resource() failed\n");
+ return -ENOMEM;
+ }
+
+ ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
if (!ctrl) {
- dev_err(jrdev, "of_iomap() failed\n");
+ dev_err(jrdev, "devm_ioremap() failed\n");
return -ENOMEM;
}
jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (caam_dpaa2)
- error = dma_set_mask_and_coherent(jrdev,
- DMA_BIT_MASK(49));
- else if (of_device_is_compatible(nprop,
- "fsl,sec-v5.0-job-ring"))
- error = dma_set_mask_and_coherent(jrdev,
- DMA_BIT_MASK(40));
- else
- error = dma_set_mask_and_coherent(jrdev,
- DMA_BIT_MASK(36));
- } else {
- error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
- }
+ error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
if (error) {
dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
error);
- iounmap(ctrl);
return error;
}
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
+ if (!jrpriv->irq) {
+ dev_err(jrdev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
+ (void *)(unsigned long)jrpriv->irq);
+ if (error)
+ return error;
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
- if (error) {
- irq_dispose_mapping(jrpriv->irq);
- iounmap(ctrl);
+ if (error)
return error;
- }
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 48dd3536060d..5a851ddc48fb 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -15,13 +15,14 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
struct split_key_result *res = context;
+ int ecode = 0;
dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
- res->err = err;
+ res->err = ecode;
complete(&res->completion);
}
@@ -47,18 +48,20 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr;
+ unsigned int local_max;
int ret = -ENOMEM;
adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
adata->keylen_pad = split_key_pad_len(adata->algtype &
OP_ALG_ALGSEL_MASK);
+ local_max = max(keylen, adata->keylen_pad);
dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
adata->keylen, adata->keylen_pad);
print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
- if (adata->keylen_pad > max_keylen)
+ if (local_max > max_keylen)
return -EINVAL;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
@@ -69,8 +72,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
memcpy(key_out, key_in, keylen);
- dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad,
- DMA_BIDIRECTIONAL);
+ dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL);
if (dma_mapping_error(jrdev, dma_addr)) {
dev_err(jrdev, "unable to map key memory\n");
goto out_free;
@@ -116,7 +118,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
adata->keylen_pad, 1);
}
- dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
+ dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL);
out_free:
kfree(desc);
return ret;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 810f0bef0652..68c1fd5dee5d 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -512,7 +512,9 @@ struct rsa_pub_pdb {
dma_addr_t n_dma;
dma_addr_t e_dma;
u32 f_len;
-} __packed;
+};
+
+#define SIZEOF_RSA_PUB_PDB (2 * sizeof(u32) + 4 * caam_ptr_sz)
/**
* RSA Decrypt PDB - Private Key Form #1
@@ -528,7 +530,9 @@ struct rsa_priv_f1_pdb {
dma_addr_t f_dma;
dma_addr_t n_dma;
dma_addr_t d_dma;
-} __packed;
+};
+
+#define SIZEOF_RSA_PRIV_F1_PDB (sizeof(u32) + 4 * caam_ptr_sz)
/**
* RSA Decrypt PDB - Private Key Form #2
@@ -554,7 +558,9 @@ struct rsa_priv_f2_pdb {
dma_addr_t tmp1_dma;
dma_addr_t tmp2_dma;
u32 p_q_len;
-} __packed;
+};
+
+#define SIZEOF_RSA_PRIV_F2_PDB (2 * sizeof(u32) + 7 * caam_ptr_sz)
/**
* RSA Decrypt PDB - Private Key Form #3
@@ -586,6 +592,8 @@ struct rsa_priv_f3_pdb {
dma_addr_t tmp1_dma;
dma_addr_t tmp2_dma;
u32 p_q_len;
-} __packed;
+};
+
+#define SIZEOF_RSA_PRIV_F3_PDB (2 * sizeof(u32) + 9 * caam_ptr_sz)
#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
index 2a8d87ea94bf..0d5ee762e036 100644
--- a/drivers/crypto/caam/pkc_desc.c
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -13,7 +13,7 @@
/* Descriptor for RSA Public operation */
void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->f_dma);
append_ptr(desc, pdb->g_dma);
@@ -26,7 +26,7 @@ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
/* Descriptor for RSA Private operation - Private Key Form #1 */
void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
@@ -39,7 +39,7 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
/* Descriptor for RSA Private operation - Private Key Form #2 */
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
@@ -56,7 +56,7 @@ void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
/* Descriptor for RSA Private operation - Private Key Form #3 */
void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 0fe618e3804a..378f627e1d64 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -163,7 +163,10 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
- drv_req->cbk(drv_req, -EIO);
+ if (fd->status)
+ drv_req->cbk(drv_req, be32_to_cpu(fd->status));
+ else
+ drv_req->cbk(drv_req, JRSTA_SSRC_QI);
}
static struct qman_fq *create_caam_req_fq(struct device *qidev,
@@ -574,8 +577,9 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
if (ssrc != JRSTA_SSRC_CCB_ERROR ||
err_id != JRSTA_CCBERR_ERRID_ICVCHK)
- dev_err(qidev, "Error: %#x in CAAM response FD\n",
- status);
+ dev_err_ratelimited(qidev,
+ "Error: %#x in CAAM response FD\n",
+ status);
}
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index f93c9c7ed430..db0549549e3b 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -14,32 +14,6 @@
#include "desc.h"
#include "desc_constr.h"
-/*
- * CAAM hardware constructs a job descriptor which points to a shared descriptor
- * (as pointed by context_a of to-CAAM FQ).
- * When the job descriptor is executed by DECO, the whole job descriptor
- * together with shared descriptor gets loaded in DECO buffer, which is
- * 64 words (each 32-bit) long.
- *
- * The job descriptor constructed by CAAM hardware has the following layout:
- *
- * HEADER (1 word)
- * Shdesc ptr (1 or 2 words)
- * SEQ_OUT_PTR (1 word)
- * Out ptr (1 or 2 words)
- * Out length (1 word)
- * SEQ_IN_PTR (1 word)
- * In ptr (1 or 2 words)
- * In length (1 word)
- *
- * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
- *
- * Apart from shdesc contents, the total number of words that get loaded in DECO
- * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
- * storing shared descriptor.
- */
-#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
-
/* Length of a single buffer in the QI driver memory cache */
#define CAAM_QI_MEMCACHE_SIZE 768
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 8591914d5c51..05127b70527d 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
/*
* Architecture-specific register access methods
@@ -70,6 +71,7 @@
extern bool caam_little_end;
extern bool caam_imx;
+extern size_t caam_ptr_sz;
#define caam_to_cpu(len) \
static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -137,46 +139,38 @@ static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
* base + 0x0000 : least-significant 32 bits
* base + 0x0004 : most-significant 32 bits
*/
-#ifdef CONFIG_64BIT
static inline void wr_reg64(void __iomem *reg, u64 data)
{
- if (caam_little_end)
- iowrite64(data, reg);
- else
+ if (caam_little_end) {
+ if (caam_imx) {
+ iowrite32(data >> 32, (u32 __iomem *)(reg));
+ iowrite32(data, (u32 __iomem *)(reg) + 1);
+ } else {
+ iowrite64(data, reg);
+ }
+ } else {
iowrite64be(data, reg);
+ }
}
static inline u64 rd_reg64(void __iomem *reg)
{
- if (caam_little_end)
- return ioread64(reg);
- else
- return ioread64be(reg);
-}
+ if (caam_little_end) {
+ if (caam_imx) {
+ u32 low, high;
-#else /* CONFIG_64BIT */
-static inline void wr_reg64(void __iomem *reg, u64 data)
-{
- if (!caam_imx && caam_little_end) {
- wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
- wr_reg32((u32 __iomem *)(reg), data);
+ high = ioread32(reg);
+ low = ioread32(reg + sizeof(u32));
+
+ return low + ((u64)high << 32);
+ } else {
+ return ioread64(reg);
+ }
} else {
- wr_reg32((u32 __iomem *)(reg), data >> 32);
- wr_reg32((u32 __iomem *)(reg) + 1, data);
+ return ioread64be(reg);
}
}
-static inline u64 rd_reg64(void __iomem *reg)
-{
- if (!caam_imx && caam_little_end)
- return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg)));
-
- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg) + 1));
-}
-#endif /* CONFIG_64BIT */
-
static inline u64 cpu_to_caam_dma64(dma_addr_t value)
{
if (caam_imx)
@@ -195,22 +189,89 @@ static inline u64 caam_dma64_to_cpu(u64 value)
return caam64_to_cpu(value);
}
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
-#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
-#else
-#define cpu_to_caam_dma(value) cpu_to_caam32(value)
-#define caam_dma_to_cpu(value) caam32_to_cpu(value)
-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+static inline u64 cpu_to_caam_dma(u64 value)
+{
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ caam_ptr_sz == sizeof(u64))
+ return cpu_to_caam_dma64(value);
+ else
+ return cpu_to_caam32(value);
+}
+
+static inline u64 caam_dma_to_cpu(u64 value)
+{
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ caam_ptr_sz == sizeof(u64))
+ return caam_dma64_to_cpu(value);
+ else
+ return caam32_to_cpu(value);
+}
/*
* jr_outentry
* Represents each entry in a JobR output ring
*/
-struct jr_outentry {
- dma_addr_t desc;/* Pointer to completed descriptor */
- u32 jrstatus; /* Status for completed descriptor */
-} __packed;
+
+static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc,
+ u32 *jrstatus)
+{
+
+ if (caam_ptr_sz == sizeof(u32)) {
+ struct {
+ u32 desc;
+ u32 jrstatus;
+ } __packed *outentry = outring;
+
+ *desc = outentry[hw_idx].desc;
+ *jrstatus = outentry[hw_idx].jrstatus;
+ } else {
+ struct {
+ dma_addr_t desc;/* Pointer to completed descriptor */
+ u32 jrstatus; /* Status for completed descriptor */
+ } __packed *outentry = outring;
+
+ *desc = outentry[hw_idx].desc;
+ *jrstatus = outentry[hw_idx].jrstatus;
+ }
+}
+
+#define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32))
+
+static inline dma_addr_t jr_outentry_desc(void *outring, int hw_idx)
+{
+ dma_addr_t desc;
+ u32 unused;
+
+ jr_outentry_get(outring, hw_idx, &desc, &unused);
+
+ return desc;
+}
+
+static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx)
+{
+ dma_addr_t unused;
+ u32 jrstatus;
+
+ jr_outentry_get(outring, hw_idx, &unused, &jrstatus);
+
+ return jrstatus;
+}
+
+static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val)
+{
+ if (caam_ptr_sz == sizeof(u32)) {
+ u32 *inpentry = inpring;
+
+ inpentry[hw_idx] = val;
+ } else {
+ dma_addr_t *inpentry = inpring;
+
+ inpentry[hw_idx] = val;
+ }
+}
+
+#define SIZEOF_JR_INPENTRY caam_ptr_sz
+
/* Version registers (Era 10+) e80-eff */
struct version_regs {
@@ -338,6 +399,7 @@ struct caam_perfmon {
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_PS BIT(17)
#define CTPR_MS_DPAA2 BIT(13)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
@@ -641,6 +703,7 @@ struct caam_job_ring {
#define JRSTA_SSRC_CCB_ERROR 0x20000000
#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
#define JRSTA_SSRC_DECO 0x40000000
+#define JRSTA_SSRC_QI 0x50000000
#define JRSTA_SSRC_JRERROR 0x60000000
#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000