summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cryptoloop.c22
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c5
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/random.c24
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-authenc.h13
-rw-r--r--drivers/crypto/atmel-ecc.c11
-rw-r--r--drivers/crypto/atmel-ecc.h14
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c5
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/caam/Kconfig57
-rw-r--r--drivers/crypto/caam/Makefile10
-rw-r--r--drivers/crypto/caam/caamalg.c728
-rw-r--r--drivers/crypto/caam/caamalg_desc.c143
-rw-r--r--drivers/crypto/caam/caamalg_desc.h28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c627
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c5165
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h223
-rw-r--r--drivers/crypto/caam/caamhash.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.h21
-rw-r--r--drivers/crypto/caam/caampkc.c1
-rw-r--r--drivers/crypto/caam/caamrng.c1
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/dpseci.c426
-rw-r--r--drivers/crypto/caam/dpseci.h333
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h149
-rw-r--r--drivers/crypto/caam/error.c79
-rw-r--r--drivers/crypto/caam/error.h6
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/caam/qi.c43
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h30
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h29
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c20
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h19
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h111
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c115
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h162
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c71
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h23
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c98
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c203
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c49
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c151
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h2
-rw-r--r--drivers/crypto/ccp/psp-dev.c47
-rw-r--r--drivers/crypto/ccp/sp-platform.c53
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c30
-rw-r--r--drivers/crypto/chelsio/chcr_core.c2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c3
-rw-r--r--drivers/crypto/mxs-dcp.c142
-rw-r--r--drivers/crypto/omap-aes.c17
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/picoxcell_crypto.c21
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c60
-rw-r--r--drivers/crypto/qce/ablkcipher.c13
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c113
-rw-r--r--drivers/crypto/sahara.c31
-rw-r--r--drivers/crypto/vmx/aes_cbc.c22
-rw-r--r--drivers/crypto/vmx/aes_ctr.c18
-rw-r--r--drivers/crypto/vmx/aes_xts.c18
-rw-r--r--drivers/md/dm-integrity.c23
-rw-r--r--drivers/md/dm-verity-fec.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c20
-rw-r--r--drivers/net/ppp/ppp_mppe.c27
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c58
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c26
-rw-r--r--drivers/usb/wusbcore/crypto.c16
84 files changed, 8442 insertions, 2251 deletions
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 7033a4beda66..254ee7d54e91 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -45,7 +45,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -80,13 +80,13 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
*cmsp++ = ')';
*cmsp = 0;
- tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
+ err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
+
if (err != 0)
goto out_free_tfm;
@@ -94,7 +94,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
out:
return err;
@@ -109,8 +109,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
- struct crypto_skcipher *tfm = lo->key_data;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct crypto_sync_skcipher *tfm = lo->key_data;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
@@ -119,7 +119,7 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
unsigned in_offs, out_offs;
int err;
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
@@ -175,9 +175,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_skcipher *tfm = lo->key_data;
+ struct crypto_sync_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5d8266c6571f..4552b06fe601 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -188,6 +188,10 @@ struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -203,6 +207,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmcp_type, "dpmcp" },
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
{ NULL, NULL }
};
int i;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aaf9e5afaad4..95be7228f327 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -44,10 +44,10 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per mill");
+ "current hwrng entropy estimation per 1024 bits of input");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
- "default entropy content of hwrng per mill");
+ "default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c75b6cdf0053..2eb70e76ed35 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -433,9 +433,9 @@ static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS]);
+ __u8 out[CHACHA20_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -926,7 +926,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u32 block[CHACHA20_BLOCK_WORDS];
+ __u8 block[CHACHA20_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -973,7 +973,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS])
+ __u8 out[CHACHA20_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -990,7 +990,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
+static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -1008,7 +1008,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -1020,14 +1020,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = &tmp[used / sizeof(__u32)];
+ s = (__u32 *) &tmp[used];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -1043,7 +1043,7 @@ static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1622,7 +1622,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -2248,7 +2248,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((__u32 *)batch->entropy_u64);
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2278,7 +2278,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng(batch->entropy_u32);
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f32c8a..8e7e225d2446 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 801aeab5ab1e..2b7af44c7b85 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c driver.
*/
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index 2a60d1224143..cbd37a2edada 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
*
@@ -5,18 +6,6 @@
*
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 74f083f45e97..ba00e4563ca0 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/bitrev.h>
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
index 25232c8abcc2..643a3b947338 100644
--- a/drivers/crypto/atmel-ecc.h
+++ b/drivers/crypto/atmel-ecc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef __ATMEL_ECC_H__
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8a19df2fba6a..ab0cfe748931 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-sham.c drivers.
*/
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 97b0423efa7f..438e1ffb2ec0 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c drivers.
*/
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 7f07a5085e9b..f3442c2bdbdc 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -330,7 +330,7 @@ struct artpec6_cryptotfm_context {
size_t key_length;
u32 key_md;
int crypto_type;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct artpec6_crypto_aead_hw_ctx {
@@ -1199,15 +1199,15 @@ artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
pr_debug("counter %x will overflow (nblks %u), falling back\n",
counter, counter + nblks);
- ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
- ctx->key_length);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
if (ret)
return ret;
{
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -1561,10 +1561,9 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
- 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback =
+ crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
@@ -1605,7 +1604,7 @@ static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
artpec6_crypto_aes_exit(tfm);
}
@@ -3174,7 +3173,6 @@ static struct platform_driver artpec6_crypto_driver = {
.remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
- .owner = THIS_MODULE,
.of_match_table = artpec6_crypto_of_match,
},
};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 1eb852765469..c4b1cade55c1 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,7 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
+ select CRYPTO_DEV_FSL_CAAM_COMMON
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -12,9 +17,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
+if CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
+
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
- depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
@@ -25,9 +37,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
+if CRYPTO_DEV_FSL_CAAM_JR
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -45,7 +58,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM_JR
help
Enable the Job Ring's interrupt coalescing feature.
@@ -75,7 +87,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -90,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+ depends on FSL_DPAA && NET
default y
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
@@ -107,7 +118,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -119,7 +129,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -131,7 +140,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -142,13 +150,32 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- depends on CRYPTO_DEV_FSL_CAAM
+endif # CRYPTO_DEV_FSL_CAAM_JR
+
+endif # CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ depends on NETDEVICES
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+ select CRYPTO_HASH
help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ (MC) fsl-mc bus.
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index cb652ee7dfc8..7bbfd06a11ff 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
ccflags-y := -DDEBUG
endif
+ccflags-y += -DVERSION=\"\"
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
+caam_jr-objs := jr.o key_gen.o
caam_pkc-y := caampkc.o pkc_desc.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ec40f991e6c6..869f092432de 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
* Based on talitos crypto API driver.
*
@@ -81,8 +82,6 @@
#define debug(format, arg...)
#endif
-static struct list_head alg_list;
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -96,17 +95,21 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
struct caam_ctx {
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
@@ -648,20 +651,20 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return rfc4543_set_sh_desc(aead);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
+ const bool is_rfc3686 = alg->caam.rfc3686;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -689,40 +692,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher_encrypt shared descriptor */
+ /* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_decrypt shared descriptor */
+ /* skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), ctx->dir);
-
return 0;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -731,15 +726,15 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts_ablkcipher_encrypt shared descriptor */
+ /* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* xts_ablkcipher_decrypt shared descriptor */
+ /* xts_skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -765,22 +760,20 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* and IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
- enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -790,8 +783,7 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize,
- enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
@@ -803,7 +795,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -814,20 +806,19 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+ edesc->src_nents, edesc->dst_nents, 0, 0,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->iv_dir,
+ edesc->iv_dma, ivsize,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -881,87 +872,74 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
ivsize, 0);
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->iv_dir == DMA_FROM_DEVICE) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
- edesc->sec4_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
-
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
-static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
#ifdef DEBUG
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
/*
@@ -1103,34 +1081,38 @@ static void init_authenc_job(struct aead_request *req,
}
/*
- * Fill in ablkcipher job descriptor
+ * Fill in skcipher job descriptor
*/
-static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void init_skcipher_job(struct skcipher_request *req,
+ struct skcipher_edesc *edesc,
+ const bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
+ u32 *sh_desc;
u32 out_options = 0;
- dma_addr_t dst_dma;
+ dma_addr_t dst_dma, ptr;
int len;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
- pr_err("asked=%d, nbytes%d\n",
- (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ pr_err("asked=%d, cryptlen%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
#endif
caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
LDST_SGF);
if (likely(req->src == req->dst)) {
@@ -1145,48 +1127,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
-}
-
-/*
- * Fill in ablkcipher givencrypt job descriptor
- */
-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- u32 *desc = edesc->hw_desc;
- u32 in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- if (edesc->src_nents == 1) {
- src_dma = sg_dma_address(req->src);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
-
- dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
/*
@@ -1275,7 +1216,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1476,35 +1417,35 @@ static int aead_decrypt(struct aead_request *req)
}
/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
+ * allocate and map the skcipher extended descriptor for skcipher
*/
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ int desc_bytes)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (req->dst != req->src) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
}
@@ -1546,7 +1487,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1555,17 +1496,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
- edesc->iv_dir = DMA_TO_DEVICE;
/* Make sure IV is located in a DMAable area */
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1583,7 +1523,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ iv_dma, ivsize, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1591,7 +1531,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
sec4_sg_bytes, 1);
#endif
@@ -1599,362 +1539,187 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
return edesc;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ init_skcipher_job(req, edesc, true);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
ivsize, 0);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+ init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the ablkcipher extended descriptor
- * for ablkcipher givencrypt
- */
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *greq,
- int desc_bytes)
-{
- struct ablkcipher_request *req = &greq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (likely(req->src == req->dst)) {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- } else {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += 1 + mapped_dst_nents;
-
- /*
- * allocate space for base edesc and hw desc commands, link tables, IV
- */
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
- desc_bytes);
- edesc->iv_dir = DMA_FROM_DEVICE;
-
- /* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents > 1)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
- 0);
-
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
- dst_sg_idx + 1, 0);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
- edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
-
- return edesc;
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
-}
-
-#define template_aead template_u.aead
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -3239,12 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct crypto_alg crypto_alg;
- struct list_head entry;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -3276,8 +3035,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
sh_desc_dec);
- ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_givenc);
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
/* copy descriptor header template value */
@@ -3287,14 +3044,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg =
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3316,9 +3073,9 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3328,8 +3085,6 @@ static void caam_aead_exit(struct crypto_aead *tfm)
static void __exit caam_algapi_exit(void)
{
-
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -3339,57 +3094,25 @@ static void __exit caam_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg = &t_alg->skcipher;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
- return ERR_PTR(-ENOMEM);
- }
-
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -3441,8 +3164,6 @@ static int __init caam_algapi_init(void)
return -ENODEV;
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3458,9 +3179,8 @@ static int __init caam_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -3477,26 +3197,20 @@ static int __init caam_algapi_init(void)
* on LP devices.
*/
if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_XTS)
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd84f34..1a6f0da14106 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -1212,11 +1213,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
+/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
+static inline void skcipher_append_src_dst(u32 *desc)
{
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,7 +1224,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
}
/**
- * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1235,9 +1233,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1280,18 +1278,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ "skcipher enc shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
/**
- * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1300,9 +1298,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1348,105 +1346,23 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
-
-/**
- * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
- * with HW-generated initialization vector.
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC.
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- */
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
-{
- u32 *key_jump_cmd, geniv;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
- (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ "skcipher dec shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
/**
- * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1481,24 +1397,23 @@ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
/**
- * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1532,15 +1447,15 @@ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM descriptor support");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5776ce..1315c8f6f951 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016 NXP
*/
@@ -42,10 +42,10 @@
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
@@ -96,20 +96,16 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
-
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d7aa7d7ff102..23c9fc4975f8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,9 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale FSL CAAM support for crypto API over QI backend.
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -43,6 +44,12 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
@@ -50,7 +57,6 @@ struct caam_ctx {
struct device *jrdev;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
enum dma_data_direction dir;
@@ -589,18 +595,19 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
#ifdef DEBUG
@@ -629,13 +636,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
- ivsize, is_rfc3686, ctx1_iv_off);
+ /* skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -656,25 +661,16 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
}
}
- if (ctx->drv_ctx[GIVENCRYPT]) {
- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
- ctx->sh_desc_givenc);
- if (ret) {
- dev_err(jrdev, "driver givenc context update failed\n");
- goto badkey;
- }
- }
-
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
@@ -687,9 +683,9 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts ablkcipher encrypt, decrypt shared descriptors */
- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ /* xts skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -712,7 +708,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -741,7 +737,7 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
@@ -750,7 +746,7 @@ struct aead_edesc {
* @drv_req: driver-specific request structure
* @sgt: the h/w link table, followed by IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
@@ -781,10 +777,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
if (type == ENCRYPT)
desc = ctx->sh_desc_enc;
- else if (type == DECRYPT)
+ else /* (type == DECRYPT) */
desc = ctx->sh_desc_dec;
- else /* (type == GIVENCRYPT) */
- desc = ctx->sh_desc_givenc;
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
@@ -803,8 +797,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- enum optype op_type, dma_addr_t qm_sg_dma,
- int qm_sg_bytes)
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -815,9 +808,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize,
- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -830,21 +821,18 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -902,9 +890,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int in_len, out_len;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
return (struct aead_edesc *)drv_ctx;
@@ -994,7 +981,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1009,7 +996,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0, 0);
+ dst_nents, 0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1028,7 +1015,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1051,7 +1038,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1138,14 +1125,14 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
-static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
- struct ablkcipher_edesc *edesc;
- struct ablkcipher_request *req = drv_req->app_ctx;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct skcipher_request *req = drv_req->app_ctx;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
@@ -1158,72 +1145,60 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
#endif
- ablkcipher_unmap(qidev, edesc, req);
-
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
+ skcipher_unmap(qidev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);
qi_cache_free(edesc);
- ablkcipher_request_complete(req, status);
+ skcipher_request_complete(req, status);
}
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, bool encrypt)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
+ return (struct skcipher_edesc *)drv_ctx;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
@@ -1255,12 +1230,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1269,20 +1244,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
iv = (u8 *)(sg_table + qm_sg_ents);
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1292,7 +1267,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
@@ -1307,7 +1282,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1315,348 +1290,172 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
fd_sgt = &edesc->drv_req.fd_sgt[0];
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
+ ivsize + req->cryptlen, 0);
if (req->src == req->dst) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else if (mapped_dst_nents > 1) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->nbytes, 0);
- }
-
- return edesc;
-}
-
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
- struct caam_drv_ctx *drv_ctx;
-
- drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- } else {
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- }
-
- qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = qm_sg_ents;
-
- qm_sg_ents += 1 + mapped_dst_nents;
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
- qm_sg_ents, ivsize);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (!edesc) {
- dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Make sure IV is located in a DMAable area */
- sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
- if (mapped_src_nents > 1)
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
-
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
- 0);
-
- edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ req->cryptlen, 0);
}
- fd_sgt = &edesc->drv_req.fd_sgt[0];
-
- if (mapped_src_nents > 1)
- dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
- 0);
- else
- dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
- req->nbytes, 0);
-
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes, 0);
-
return edesc;
}
-static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
if (unlikely(caam_congested))
return -EAGAIN;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, encrypt);
+ edesc = skcipher_edesc_alloc(req, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
if (!encrypt)
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
+ skcipher_unmap(ctx->qidev, edesc, req);
qi_cache_free(edesc);
}
return ret;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, true);
+ return skcipher_crypt(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, false);
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ret;
-
- if (unlikely(caam_congested))
- return -EAGAIN;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
+ return skcipher_crypt(req, false);
}
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam-qi",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam-qi",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -2528,12 +2327,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct list_head entry;
- struct crypto_alg crypto_alg;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -2572,19 +2365,18 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
ctx->drv_ctx[DECRYPT] = NULL;
- ctx->drv_ctx[GIVENCRYPT] = NULL;
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2602,16 +2394,15 @@ static void caam_exit_common(struct caam_ctx *ctx)
{
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2619,10 +2410,8 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static struct list_head alg_list;
static void __exit caam_qi_algapi_exit(void)
{
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -2632,55 +2421,25 @@ static void __exit caam_qi_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
-
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg)
- return ERR_PTR(-ENOMEM);
+ struct skcipher_alg *alg = &t_alg->skcipher;
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -2734,8 +2493,6 @@ static int __init caam_qi_algapi_init(void)
return -ENODEV;
}
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -2751,9 +2508,8 @@ static int __init caam_qi_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -2765,23 +2521,16 @@ static int __init caam_qi_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- dev_warn(priv->qidev, "%s alg allocation failed\n",
- alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
dev_warn(priv->qidev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 000000000000..7d8ac0222fa3
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,5165 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "dpseci_cmd.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "caamhash_desc.h"
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+
+#define CAAM_CRA_PRIORITY 2000
+
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+#endif
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This can be added by the dpaa2-eth driver. This would
+ * pose a problem for userspace application processing which cannot
+ * know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+struct caam_alg_entry {
+ struct device *dev;
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/**
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: [authentication key], encryption key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
+ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ */
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
+ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+};
+
+static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
+ iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+/*
+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
+ *
+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
+ * hosting 16 SG entries.
+ *
+ * @flags - flags that would be used for the equivalent kmalloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+static inline void *qi_cache_zalloc(gfp_t flags)
+{
+ return kmem_cache_zalloc(qi_cache, flags);
+}
+
+/*
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * @obj - buffer previously allocated by qi_cache_zalloc
+ *
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ */
+static inline void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+
+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
+{
+ switch (crypto_tfm_alg_type(areq->tfm)) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return skcipher_request_ctx(skcipher_request_cast(areq));
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
+ case CRYPTO_ALG_TYPE_AHASH:
+ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
+ DESC_QI_AEAD_ENC_LEN) +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
+ nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true, priv->sec_attr.era);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+ ivsize = crypto_aead_ivsize(aead);
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_nents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_nents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+ return edesc;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4543_set_sh_desc(aead);
+}
+
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher),
+ struct caam_skcipher_alg, skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(dev, "key size mismatch\n");
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* xts_skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct skcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ u8 *iv;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
+ dpaa2_fl_set_len(out_fle, req->cryptlen);
+
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+
+ if (req->src == req->dst) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+ sizeof(*sg_table));
+ } else if (mapped_dst_nents > 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ }
+
+ return edesc;
+}
+
+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void aead_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
+ JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_decrypt(req);
+}
+
+static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = skcipher_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = skcipher_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
+{
+ dma_addr_t dma_addr;
+ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ ctx->dev = caam->dev;
+ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
+ offsetof(struct caam_ctx, flc_dma),
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
+
+ return 0;
+}
+
+static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
+ offsetof(struct caam_ctx, flc_dma), ctx->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_skcipher *tfm)
+{
+ caam_exit_common(crypto_skcipher_ctx(tfm));
+}
+
+static void caam_cra_exit_aead(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct caam_skcipher_alg driver_algs[] = {
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+};
+
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+{
+ struct skcipher_alg *alg = &t_alg->skcipher;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_skcipher;
+ alg->exit = caam_cra_exit;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_aead;
+ alg->exit = caam_cra_exit_aead;
+}
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+enum hash_optype {
+ UPDATE = 0,
+ UPDATE_FIRST,
+ FINALIZE,
+ DIGEST,
+ HASH_NUM_OP
+};
+
+/**
+ * caam_hash_ctx - ahash per-session context
+ * @flc: Flow Contexts array
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @dev: dpseci device
+ * @ctx_len: size of Context Register
+ * @adata: hashing algorithm details
+ */
+struct caam_hash_ctx {
+ struct caam_flc flc[HASH_NUM_OP];
+ dma_addr_t flc_dma[HASH_NUM_OP];
+ struct device *dev;
+ int ctx_len;
+ struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ struct caam_request caam_req;
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+struct caam_export_state {
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+ u8 caam_ctx[MAX_CTX_LEN];
+ int buflen;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+ state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_qm_sg(struct device *dev,
+ struct dpaa2_sg_entry *qm_sg,
+ struct caam_hash_state *state)
+{
+ int buflen = *current_buflen(state);
+
+ if (!buflen)
+ return 0;
+
+ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, state->buf_dma)) {
+ dev_err(dev, "unable to map buf\n");
+ state->buf_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
+
+ return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_qm_sg(struct device *dev,
+ struct caam_hash_state *state, int ctx_len,
+ struct dpaa2_sg_entry *qm_sg, u32 flag)
+{
+ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(dev, state->ctx_dma)) {
+ dev_err(dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
+ struct caam_flc *flc;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ flc = &ctx->flc[UPDATE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_update_first shared descriptor */
+ flc = &ctx->flc[UPDATE_FIRST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_final shared descriptor */
+ flc = &ctx->flc[FINALIZE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_digest shared descriptor */
+ flc = &ctx->flc[DIGEST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ return 0;
+}
+
+struct split_key_sh_result {
+ struct completion completion;
+ int err;
+ struct device *dev;
+};
+
+static void split_key_sh_done(void *cbk_ctx, u32 err)
+{
+ struct split_key_sh_result *res = cbk_ctx;
+
+ dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+
+ if (err)
+ caam_qi2_strstatus(res->dev, err);
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct caam_request *req_ctx;
+ u32 *desc;
+ struct split_key_sh_result result;
+ dma_addr_t src_dma, dst_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ int ret = -ENOMEM;
+ struct dpaa2_fl_entry *in_fle, *out_fle;
+
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ if (!req_ctx)
+ return -ENOMEM;
+
+ in_fle = &req_ctx->fd_flt[1];
+ out_fle = &req_ctx->fd_flt[0];
+
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ if (!flc)
+ goto err_flc;
+
+ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, src_dma)) {
+ dev_err(ctx->dev, "unable to map key input memory\n");
+ goto err_src_dma;
+ }
+ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, dst_dma)) {
+ dev_err(ctx->dev, "unable to map key output memory\n");
+ goto err_dst_dma;
+ }
+
+ desc = flc->sh_desc;
+
+ init_sh_desc(desc, 0);
+
+ /* descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, flc_dma)) {
+ dev_err(ctx->dev, "unable to map shared descriptor\n");
+ goto err_flc_dma;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_len(in_fle, *keylen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ result.err = 0;
+ init_completion(&result.completion);
+ result.dev = ctx->dev;
+
+ req_ctx->flc = flc;
+ req_ctx->flc_dma = flc_dma;
+ req_ctx->cbk = split_key_sh_done;
+ req_ctx->ctx = &result;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS) {
+ /* in progress */
+ wait_for_completion(&result.completion);
+ ret = result.err;
+ print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+ }
+
+ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+ DMA_TO_DEVICE);
+err_flc_dma:
+ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
+err_dst_dma:
+ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
+err_src_dma:
+ kfree(flc);
+err_flc:
+ kfree(req_ctx);
+
+ *keylen = digestsize;
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int ret;
+ u8 *hashed_key = NULL;
+
+ dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
+ GFP_KERNEL | GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto bad_free_key;
+ key = hashed_key;
+ }
+
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ ctx->adata.key_virt = key;
+ ctx->adata.key_inline = true;
+
+ ret = ahash_set_sh_desc(ahash);
+ kfree(hashed_key);
+ return ret;
+bad_free_key:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (edesc->src_nents)
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ if (edesc->dst_dma)
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+ if (edesc->qm_sg_bytes)
+ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (state->buf_dma) {
+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len,
+ u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma) {
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ state->ctx_dma = 0;
+ }
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_bi(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state), last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ if (mapped_nents) {
+ sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_table + qm_sg_src_index, 0);
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ } else {
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
+ true);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
+ req_ctx->cbk = ahash_done_bi;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return -ENOMEM;
+
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ state->buf_dma = 0;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return ret;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ edesc->src_nents = src_nents;
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
+
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int buflen = *current_buflen(state);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return ret;
+
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int in_len = *buflen + req->nbytes, to_hash;
+ int qm_sg_bytes, src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - *next_buflen);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap;
+
+ return ret;
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return -ENOMEM;
+}
+
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int to_hash;
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ sg_table = &edesc->sgt[0];
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, to_hash);
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
+ switch_buf(state);
+ }
+
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->ctx_dma = 0;
+ state->current_buf = 0;
+ state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_export_state *export = out;
+ int len;
+ u8 *buf;
+
+ if (state->current_buf) {
+ buf = state->buf_1;
+ len = state->buflen_1;
+ } else {
+ buf = state->buf_0;
+ len = state->buflen_0;
+ }
+
+ memcpy(export->buf, buf, len);
+ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+ export->buflen = len;
+ export->update = state->update;
+ export->final = state->final;
+ export->finup = state->finup;
+
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ const struct caam_export_state *export = in;
+
+ memset(state, 0, sizeof(*state));
+ memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+ state->buflen_0 = export->buflen;
+ state->update = export->update;
+ state->final = export->final;
+ state->finup = export->finup;
+
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam-qi2",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam-qi2",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam-qi2",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam-qi2",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam-qi2",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam-qi2",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam-qi2",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam-qi2",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam-qi2",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam-qi2",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam-qi2",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam-qi2",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ }
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *dev;
+ int alg_type;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
+ int i;
+
+ ctx->dev = caam_hash->dev;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HASH_NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+
+ /* copy descriptor header template value */
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
+ struct caam_hash_template *template, bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->dev = dev;
+
+ return t_alg;
+}
+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+
+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
+ napi_schedule_irqoff(&ppriv->napi);
+}
+
+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->priv = priv;
+ nctx = &ppriv->nctx;
+ nctx->is_cdan = 0;
+ nctx->id = ppriv->rsp_fqid;
+ nctx->desired_cpu = cpu;
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
+ /*
+ * If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err:
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ }
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->store)
+ break;
+ dpaa2_io_store_destroy(ppriv->store);
+ }
+
+ return err;
+}
+
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+ return;
+ }
+}
+
+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
+{
+ struct dpseci_rx_queue_cfg rx_queue_cfg;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i = 0, cpu;
+
+ /* Configure Rx queues */
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+
+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
+ DPSECI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.order_preservation_en = 0;
+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ /*
+ * Rx priority (WQ) doesn't really matter, since we use
+ * pull mode, i.e. volatile dequeues from specific FQs
+ */
+ rx_queue_cfg.dest_cfg.priority = 0;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+
+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
+ err);
+ return err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return err;
+}
+
+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+
+ if (!priv->cscn_mem)
+ return;
+
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ kfree(priv->cscn_mem);
+}
+
+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+
+ dpaa2_dpseci_congestion_free(priv);
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct caam_request *req;
+ u32 fd_err;
+
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
+ return;
+ }
+
+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
+ if (unlikely(fd_err))
+ dev_err(priv->dev, "FD error: %08x\n", fd_err);
+
+ /*
+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+ * in FD[ERR] or FD[FRC].
+ */
+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
+}
+
+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+ if (unlikely(err))
+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ struct dpaa2_dq *dq;
+ int cleaned = 0, is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ if (unlikely(!dq)) {
+ if (unlikely(!is_last)) {
+ dev_dbg(ppriv->priv->dev,
+ "FQ %d returned no valid frames\n",
+ ppriv->rsp_fqid);
+ /*
+ * MUST retry until we get some sort of
+ * valid response token (be it "empty dequeue"
+ * or a valid frame).
+ */
+ continue;
+ }
+ break;
+ }
+
+ /* Process FD */
+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct dpaa2_caam_priv *priv;
+ int err, cleaned = 0, store_cleaned;
+
+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
+ priv = ppriv->priv;
+
+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
+ return 0;
+
+ do {
+ store_cleaned = dpaa2_caam_store_consume(ppriv);
+ cleaned += store_cleaned;
+
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
+ break;
+
+ /* Try to dequeue some more */
+ err = dpaa2_caam_pull_fq(ppriv);
+ if (unlikely(err))
+ break;
+ } while (1);
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+ }
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
+ u16 token)
+{
+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->dev;
+ int err;
+
+ /*
+ * Congestion group feature supported starting with DPSECI API v5.1
+ * and only when object has been created with this capability.
+ */
+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
+ return 0;
+
+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->cscn_mem)
+ return -ENOMEM;
+
+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, priv->cscn_dma)) {
+ dev_err(dev, "Error mapping CSCN memory area\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
+ cong_notif_cfg.message_ctx = (uintptr_t)priv;
+ cong_notif_cfg.message_iova = priv->cscn_dma;
+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
+ DPSECI_CGN_MODE_COHERENT_WRITE;
+
+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
+ &cong_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
+ goto err_set_cong;
+ }
+
+ return 0;
+
+err_set_cong:
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ kfree(priv->cscn_mem);
+
+ return err;
+}
+
+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, cpu;
+ u8 i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpsec_id = ls_dev->obj_desc.id;
+
+ /* Get a handle for the DPSECI this interface is associate with */
+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_open() failed: %d\n", err);
+ goto err_open;
+ }
+
+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
+ &priv->minor_ver);
+ if (err) {
+ dev_err(dev, "dpseci_get_api_version() failed\n");
+ goto err_get_vers;
+ }
+
+ dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+
+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpseci_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_attributes() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->sec_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "setup_congestion() failed\n");
+ goto err_get_vers;
+ }
+
+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
+ priv->dpseci_attr.num_tx_queues);
+ if (priv->num_pairs > num_online_cpus()) {
+ dev_warn(dev, "%d queues won't be used\n",
+ priv->num_pairs - num_online_cpus());
+ priv->num_pairs = num_online_cpus();
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->tx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
+ priv->rx_queue_attr[i].fqid,
+ priv->tx_queue_attr[i].fqid);
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+ ppriv->prio = i;
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err_get_rx_queue:
+ dpaa2_dpseci_congestion_free(priv);
+err_get_vers:
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+err_open:
+ return err;
+}
+
+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_enable(&ppriv->napi);
+ }
+
+ return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int i, err = 0, enabled;
+
+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_disable() failed\n");
+ return err;
+ }
+
+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
+ if (err) {
+ dev_err(dev, "dpseci_is_enabled() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_disable(&ppriv->napi);
+ netif_napi_del(&ppriv->napi);
+ }
+
+ return 0;
+}
+
+static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i, err = 0;
+ bool registered = false;
+
+ /*
+ * There is no way to get CAAM endianness - there is no direct register
+ * space access and MC f/w does not provide this attribute.
+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
+ * property.
+ */
+ caam_little_end = true;
+
+ caam_imx = false;
+
+ dev = &dpseci_dev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->domain = iommu_get_domain_for_dev(dev);
+
+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ 0, SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(dev, "Can't allocate SEC cache\n");
+ return -ENOMEM;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
+ goto err_dma_mask;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+
+ goto err_dma_mask;
+ }
+
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPSECI binding to DPIO */
+ err = dpaa2_dpseci_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPSECI enable */
+ err = dpaa2_dpseci_enable(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_enable() failed\n");
+ goto err_bind;
+ }
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (alg_sel == OP_ALG_ALGSEL_3DES ||
+ alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
+ err = crypto_register_skcipher(&t_alg->skcipher);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->skcipher.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
+ c1_alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ c1_alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->aead.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
+ /* register hash algorithms the device supports */
+ INIT_LIST_HEAD(&hash_list);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!priv->sec_attr.md_acc_num)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(dev, alg, true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s hash alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(dev, alg, false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+ }
+ if (!list_empty(&hash_list))
+ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
+
+ return err;
+
+err_bind:
+ dpaa2_dpseci_dpio_free(priv);
+err_dpio_setup:
+ dpaa2_dpseci_free(priv);
+err_dpseci_setup:
+ free_percpu(priv->ppriv);
+err_alloc_ppriv:
+ fsl_mc_portal_free(priv->mc_io);
+err_dma_mask:
+ kmem_cache_destroy(qi_cache);
+
+ return err;
+}
+
+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
+ }
+
+ if (hash_list.next) {
+ struct caam_hash_alg *t_hash_alg, *p;
+
+ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+ }
+
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ free_percpu(priv->ppriv);
+ fsl_mc_portal_free(priv->mc_io);
+ kmem_cache_destroy(qi_cache);
+
+ return 0;
+}
+
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ int err = 0, i, id;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (priv->cscn_mem) {
+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ dev_dbg_ratelimited(dev, "Dropping request\n");
+ return -EBUSY;
+ }
+ }
+
+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
+ goto err_out;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+ * thus take action.
+ */
+ preempt_disable();
+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
+ err = dpaa2_io_service_enqueue_fq(NULL,
+ priv->tx_queue_attr[id].fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ preempt_enable();
+
+ if (unlikely(err)) {
+ dev_err(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+ return -EINPROGRESS;
+
+err_out:
+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ return -EIO;
+}
+EXPORT_SYMBOL(dpaa2_caam_enqueue);
+
+static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpseci",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_caam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_caam_probe,
+ .remove = dpaa2_caam_remove,
+ .match_id_table = dpaa2_caam_match_id_table
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
+
+module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 000000000000..9823bdefd029
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAMALG_QI2_H_
+#define _CAAMALG_QI2_H_
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include <linux/threads.h>
+#include "dpseci.h"
+#include "desc_constr.h"
+
+#define DPAA2_CAAM_STORE_SIZE 16
+/* NAPI weight *must* be a multiple of the store size. */
+#define DPAA2_CAAM_NAPI_WEIGHT 64
+
+/* The congestion entrance threshold was chosen so that on LS2088
+ * we support the maximum throughput for the available memory
+ */
+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
+
+/**
+ * dpaa2_caam_priv - driver private data
+ * @dpseci_id: DPSECI object unique ID
+ * @major_ver: DPSECI major version
+ * @minor_ver: DPSECI minor version
+ * @dpseci_attr: DPSECI attributes
+ * @sec_attr: SEC engine attributes
+ * @rx_queue_attr: array of Rx queue attributes
+ * @tx_queue_attr: array of Tx queue attributes
+ * @cscn_mem: pointer to memory region containing the congestion SCN
+ * it's size is larger than to accommodate alignment
+ * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
+ * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
+ * @dev: device associated with the DPSECI object
+ * @mc_io: pointer to MC portal's I/O object
+ * @domain: IOMMU domain
+ * @ppriv: per CPU pointers to privata data
+ */
+struct dpaa2_caam_priv {
+ int dpsec_id;
+
+ u16 major_ver;
+ u16 minor_ver;
+
+ struct dpseci_attr dpseci_attr;
+ struct dpseci_sec_attr sec_attr;
+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ int num_pairs;
+
+ /* congestion */
+ void *cscn_mem;
+ void *cscn_mem_aligned;
+ dma_addr_t cscn_dma;
+
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct iommu_domain *domain;
+
+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+};
+
+/**
+ * dpaa2_caam_priv_per_cpu - per CPU private data
+ * @napi: napi structure
+ * @net_dev: netdev used by napi
+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct net_device net_dev;
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
+};
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * skcipher_edesc - s/w-extended skcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table, followed by IV
+ */
+struct skcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: I/O virtual address of req->result
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @sgt: pointer to h/w link table
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t qm_sg_dma;
+ int src_nents;
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
+} ____cacheline_aligned;
+
+enum optype {
+ ENCRYPT = 0,
+ DECRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_request - the request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_flt: Frame list table defining input and output
+ * fd_flt[0] - FLE pointing to output buffer
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
+ * @flc_dma: I/O virtual address of Flow Context
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
+ */
+struct caam_request {
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+ void *edesc;
+};
+
+/**
+ * dpaa2_caam_enqueue() - enqueue a crypto request
+ * @dev: device associated with the DPSECI object
+ * @req: pointer to caam_request
+ */
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
+
+#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab5f09c..46924affa0bd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
@@ -62,6 +63,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamhash_desc.h"
#define CAAM_CRA_PRIORITY 3000
@@ -71,14 +73,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-/* length of descriptors text */
-#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -235,60 +229,6 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
return 0;
}
-/*
- * For ahash update, final and finup (import_ctx = true)
- * import context, read and write to seqout
- * For ahash firsts and digest (import_ctx = false)
- * read and write to seqout
- */
-static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx,
- int era)
-{
- u32 op = ctx->adata.algtype;
- u32 *skip_key_load;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Append key if it has been set; ahash update excluded */
- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
- /* Skip key loading if already shared */
- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- if (era < 6)
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_proto_dkp(desc, &ctx->adata);
-
- set_jump_tgt_here(desc, skip_key_load);
-
- op |= OP_ALG_AAI_HMAC_PRECOMP;
- }
-
- /* If needed, import context from software */
- if (import_ctx)
- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
- * Calculate remaining bytes to read
- */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -301,8 +241,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -313,8 +253,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -325,8 +265,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -337,8 +277,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
new file mode 100644
index 000000000000..a12f7959a2c3
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamhash_desc.h"
+
+/**
+ * cnstr_shdsc_ahash - ahash shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @import_ctx: true if previous Context Register needs to be restored
+ * must be true for ahash update and final
+ * must be false for for ahash first and digest
+ * @era: SEC Era
+ */
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era)
+{
+ u32 op = adata->algtype;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Append key if it has been set; ahash update excluded */
+ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
+ u32 *skip_key_load;
+
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ if (era < 6)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, adata);
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
+
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+EXPORT_SYMBOL(cnstr_shdsc_ahash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
+MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
new file mode 100644
index 000000000000..631fc1ac312c
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#ifndef _CAAMHASH_DESC_H_
+#define _CAAMHASH_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
+#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e5533a..4fc209cbbeab 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4ff019..4318b0aa6fb9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for hw_random
*
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 1c71e0cd5098..9604ff7a335e 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
@@ -39,6 +40,7 @@
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f428c1..3fc793193821 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 000000000000..8a68531ded0b
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an already created
+ * object; an object may have been declared statically in the DPL
+ * or created dynamically.
+ * This function returns a unique authentication token, associated with the
+ * specific object ID and the specific MC portal; this token must be used in all
+ * subsequent commands for this specific object.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are allowed on the
+ * object without opening a new control session.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+ attr->options = le32_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
+ * Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
+ DEST_TYPE);
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+ attr->order_preservation_en =
+ dpseci_get_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_congestion_notification() - Get congestion group notification
+ * configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.priority = rsp_params->priority;
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
+ CGN_DEST_TYPE);
+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 000000000000..4550e134d166
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+#ifndef _DPSECI_H_
+#define _DPSECI_H_
+
+/*
+ * Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx queues per DPSECI object
+ */
+#define DPSECI_MAX_QUEUE_NUM 16
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (u8)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC;
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ u32 options;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 priorities[DPSECI_MAX_QUEUE_NUM];
+};
+
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ */
+struct dpseci_attr {
+ int id;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u32 options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue from
+ * the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO,
+ DPSECI_DEST_DPCON
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
+ * not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
+ * in 'options'
+ * @dest_cfg: Queue destination parameters; valid only if
+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ u32 options;
+ int order_preservation_en;
+ u64 user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration on the
+ * queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ u64 user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ u32 fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ u32 fqid;
+ u8 priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC
+ * @major_rev: Major revision number for SEC
+ * @minor_rev: Minor revision number for SEC
+ * @era: SEC Era
+ * @deco_num: The number of copies of the DECO that are implemented in this
+ * version of SEC
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
+ * this version of SEC
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ * @rng_acc_num: The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ * @des_acc_num: The number of copies of the DES module that are implemented in
+ * this version of SEC
+ * @aes_acc_num: The number of copies of the AES module that are implemented in
+ * this version of SEC
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
+ **/
+struct dpseci_sec_attr {
+ u16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 000000000000..6ab77ead6e3d
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _DPSECI_CMD_H_
+#define _DPSECI_CMD_H_
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 3
+
+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION)
+
+#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1 byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+struct dpseci_cmd_open {
+ __le32 dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ u8 is_enabled;
+};
+
+struct dpseci_rsp_get_attributes {
+ __le32 id;
+ __le32 pad0;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 pad1[6];
+ __le32 options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+ u8 order_preservation_en;
+};
+
+struct dpseci_rsp_get_tx_queue {
+ __le32 pad;
+ __le32 fqid;
+ u8 priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ __le16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 pad0[3];
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 pad1;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pad2;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 pad3;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+struct dpseci_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
+#define DPSECI_CGN_DEST_TYPE_SIZE 4
+#define DPSECI_CGN_UNITS_SHIFT 4
+#define DPSECI_CGN_UNITS_SIZE 2
+
+struct dpseci_cmd_congestion_notification {
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 priority;
+ u8 options;
+ __le64 message_iova;
+ __le64 message_ctx;
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 8da88beb1abb..7e8d690f2827 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -108,6 +108,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
+static const struct {
+ u8 value;
+ const char *error_text;
+} qi_error_list[] = {
+ { 0x1F, "Job terminated by FQ or ICID flush" },
+ { 0x20, "FD format error"},
+ { 0x21, "FD command format error"},
+ { 0x23, "FL format error"},
+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
+ { 0x30, "Max. buffer size too small"},
+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
+ { 0x33, "Size over/underflow (allocate mode)"},
+ { 0x34, "Size over/underflow (reuse mode)"},
+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
+ { 0x41, "SBC frame format not supported (allocate mode)"},
+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
+ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x51, "Unsupported IF reuse mode"},
+ { 0x52, "Unsupported FL use mode"},
+ { 0x53, "Unsupported RJD use mode"},
+ { 0x54, "Unsupported inline descriptor use mode"},
+ { 0xC0, "Table buffer pool 0 depletion"},
+ { 0xC1, "Table buffer pool 1 depletion"},
+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
+ { 0xD0, "FLC read error"},
+ { 0xD1, "FL read error"},
+ { 0xD2, "FL write error"},
+ { 0xD3, "OF SGT write error"},
+ { 0xD4, "PTA read error"},
+ { 0xD5, "PTA write error"},
+ { 0xD6, "OF SGT F-bit write error"},
+ { 0xD7, "ASA write error"},
+ { 0xE1, "FLC[ICR]=0 ICID error"},
+ { 0xE2, "FLC[ICR]=1 ICID error"},
+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
+};
+
static const char * const cha_id_list[] = {
"",
"AES",
@@ -236,6 +284,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
status, error, idx_str, idx, err_str, err_err_code);
}
+static void report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
+ if (qi_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
+ err_str = qi_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(qidev, "%08x: %s: %s%s\n",
+ status, error, err_str, err_err_code);
+}
+
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
@@ -250,7 +319,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
status, error, __func__);
}
-void caam_jr_strstatus(struct device *jrdev, u32 status)
+void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, "Queue Manager Interface" },
+ { report_qi_status, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
@@ -288,4 +357,8 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
-EXPORT_SYMBOL(caam_jr_strstatus);
+EXPORT_SYMBOL(caam_strstatus);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM error reporting");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 5aa332bac4b0..67ea94079837 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,7 +8,11 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
-void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+
+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd72016ffe..d50085a03597 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c42c93..b84e6c8b1e13 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -84,13 +84,6 @@ static u64 times_congested;
#endif
/*
- * CPU from where the module initialised. This is required because QMan driver
- * requires CGRs to be removed from same CPU from where they were originally
- * allocated.
- */
-static int mod_init_cpu;
-
-/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-int caam_qi_shutdown(struct device *qidev)
+void caam_qi_shutdown(struct device *qidev)
{
- int i, ret;
+ int i;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
- /*
- * QMan driver requires CGRs to be deleted from same CPU from where they
- * were instantiated. Hence we get the module removal execute from the
- * same CPU from where it was originally inserted.
- */
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- ret = qman_delete_cgr(&priv->cgr);
- if (ret)
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
- else
- qman_release_cgrid(priv->cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr);
+ qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
platform_device_unregister(priv->qi_pdev);
- return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
static struct platform_device_info qi_pdev_info = {
.name = "caam_qi",
.id = PLATFORM_DEVID_NONE
};
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
- * was originally allocated. Hence we need to note down the
- * initialisation CPU and use the same CPU for module exit.
- * We select the first CPU to from the list of portal owning CPUs.
- * Then we pin module init to this CPU.
- */
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
qi_pdev_info.parent = ctrldev;
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
qi_pdev = platform_device_register_full(&qi_pdev_info);
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
- /* Done with the CGRs; restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f57072..f93c9c7ed430 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -62,7 +62,6 @@ typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
enum optype {
ENCRYPT,
DECRYPT,
- GIVENCRYPT,
NUM_OP
};
@@ -174,7 +173,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-int caam_qi_shutdown(struct device *dev);
+void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4fb91ba39c36..457815f965c0 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -70,22 +70,22 @@
extern bool caam_little_end;
extern bool caam_imx;
-#define caam_to_cpu(len) \
-static inline u##len caam##len ## _to_cpu(u##len val) \
-{ \
- if (caam_little_end) \
- return le##len ## _to_cpu(val); \
- else \
- return be##len ## _to_cpu(val); \
+#define caam_to_cpu(len) \
+static inline u##len caam##len ## _to_cpu(u##len val) \
+{ \
+ if (caam_little_end) \
+ return le##len ## _to_cpu((__force __le##len)val); \
+ else \
+ return be##len ## _to_cpu((__force __be##len)val); \
}
-#define cpu_to_caam(len) \
-static inline u##len cpu_to_caam##len(u##len val) \
-{ \
- if (caam_little_end) \
- return cpu_to_le##len(val); \
- else \
- return cpu_to_be##len(val); \
+#define cpu_to_caam(len) \
+static inline u##len cpu_to_caam##len(u##len val) \
+{ \
+ if (caam_little_end) \
+ return (__force u##len)cpu_to_le##len(val); \
+ else \
+ return (__force u##len)cpu_to_be##len(val); \
}
caam_to_cpu(16)
@@ -633,6 +633,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
+#define JRSTA_QIERR_ERROR_MASK 0x00ff
+
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4df745f..b3e1aaaeffea 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SG_SW_QM_H
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index b5b4c12179df..c9378402a5f8 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -1,35 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2015-2016 Freescale Semiconductor, Inc.
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the names of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SG_SW_QM2_H_
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index b0ba4331944b..ca549c5dc08e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -308,21 +308,11 @@ void do_request_cleanup(struct cpt_vf *cptvf,
}
}
- if (info->scatter_components)
- kzfree(info->scatter_components);
-
- if (info->gather_components)
- kzfree(info->gather_components);
-
- if (info->out_buffer)
- kzfree(info->out_buffer);
-
- if (info->in_buffer)
- kzfree(info->in_buffer);
-
- if (info->completion_addr)
- kzfree((void *)info->completion_addr);
-
+ kzfree(info->scatter_components);
+ kzfree(info->gather_components);
+ kzfree(info->out_buffer);
+ kzfree(info->in_buffer);
+ kzfree((void *)info->completion_addr);
kzfree(info);
}
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index 45b7379e8e30..e12954791673 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -7,3 +7,6 @@ n5pf-objs := nitrox_main.o \
nitrox_hal.o \
nitrox_reqmgr.o \
nitrox_algs.o
+
+n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
+n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 312f72801af6..863143a8336b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -12,32 +12,15 @@ void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
void nitrox_put_device(struct nitrox_device *ndev);
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
-int nitrox_pf_init_isr(struct nitrox_device *ndev);
-
int nitrox_common_sw_init(struct nitrox_device *ndev);
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
-void pkt_slc_resp_handler(unsigned long data);
+void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
struct skcipher_request *skreq);
void backlog_qflush_work(struct work_struct *work);
-void nitrox_config_emu_unit(struct nitrox_device *ndev);
-void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
-void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
-void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
-void nitrox_config_nps_unit(struct nitrox_device *ndev);
-void nitrox_config_pom_unit(struct nitrox_device *ndev);
-void nitrox_config_rand_unit(struct nitrox_device *ndev);
-void nitrox_config_efl_unit(struct nitrox_device *ndev);
-void nitrox_config_bmi_unit(struct nitrox_device *ndev);
-void nitrox_config_bmo_unit(struct nitrox_device *ndev);
-void nitrox_config_lbc_unit(struct nitrox_device *ndev);
-void invalidate_lbc(struct nitrox_device *ndev);
-void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
-void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
#endif /* __NITROX_COMMON_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 9dcb7fdbe0a7..1ad27b1a87c5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -7,9 +7,16 @@
/* EMU clusters */
#define NR_CLUSTERS 4
+/* Maximum cores per cluster,
+ * varies based on partname
+ */
#define AE_CORES_PER_CLUSTER 20
#define SE_CORES_PER_CLUSTER 16
+#define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define ZIP_MAX_CORES 5
+
/* BIST registers */
#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
#define UCD_BIST_STATUS 0x12C0070
@@ -111,6 +118,9 @@
#define LBC_ELM_VF65_128_INT 0x120C000
#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
+#define RST_BOOT 0x10C1600
+#define FUS_DAT1 0x10C1408
+
/* PEM registers */
#define PEM0_INT 0x1080428
@@ -1082,4 +1092,105 @@ union lbc_inval_status {
} s;
};
+/**
+ * struct rst_boot: RST Boot Register
+ * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
+ * is disabled
+ * @jt_tst_mode: JTAG test mode
+ * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
+ * 0x1 = 1.8V
+ * 0x2 = 2.5V
+ * 0x4 = 3.3V
+ * All other values are reserved
+ * @pnr_mul: clock multiplier
+ * @lboot: last boot cause mask, resets only with PLL_DC_OK
+ * @rboot: determines whether core 0 remains in reset after
+ * chip cold or warm or soft reset
+ * @rboot_pin: read only access to REMOTE_BOOT pin
+ */
+union rst_boot {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_63 : 1;
+ u64 jtcsrdis : 1;
+ u64 raz_59_61 : 3;
+ u64 jt_tst_mode : 1;
+ u64 raz_40_57 : 18;
+ u64 io_supply : 3;
+ u64 raz_30_36 : 7;
+ u64 pnr_mul : 6;
+ u64 raz_12_23 : 12;
+ u64 lboot : 10;
+ u64 rboot : 1;
+ u64 rboot_pin : 1;
+#else
+ u64 rboot_pin : 1;
+ u64 rboot : 1;
+ u64 lboot : 10;
+ u64 raz_12_23 : 12;
+ u64 pnr_mul : 6;
+ u64 raz_30_36 : 7;
+ u64 io_supply : 3;
+ u64 raz_40_57 : 18;
+ u64 jt_tst_mode : 1;
+ u64 raz_59_61 : 3;
+ u64 jtcsrdis : 1;
+ u64 raz_63 : 1;
+#endif
+ };
+};
+
+/**
+ * struct fus_dat1: Fuse Data 1 Register
+ * @pll_mul: main clock PLL multiplier hardware limit
+ * @pll_half_dis: main clock PLL control
+ * @efus_lck: efuse lockdown
+ * @zip_info: ZIP information
+ * @bar2_sz_conf: when zero, BAR2 size conforms to
+ * PCIe specification
+ * @efus_ign: efuse ignore
+ * @nozip: ZIP disable
+ * @pll_alt_matrix: select alternate PLL matrix
+ * @pll_bwadj_denom: select CLKF denominator for
+ * BWADJ value
+ * @chip_id: chip ID
+ */
+union fus_dat1 {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_57_63 : 7;
+ u64 pll_mul : 3;
+ u64 pll_half_dis : 1;
+ u64 raz_43_52 : 10;
+ u64 efus_lck : 3;
+ u64 raz_26_39 : 14;
+ u64 zip_info : 5;
+ u64 bar2_sz_conf : 1;
+ u64 efus_ign : 1;
+ u64 nozip : 1;
+ u64 raz_11_17 : 7;
+ u64 pll_alt_matrix : 1;
+ u64 pll_bwadj_denom : 2;
+ u64 chip_id : 8;
+#else
+ u64 chip_id : 8;
+ u64 pll_bwadj_denom : 2;
+ u64 pll_alt_matrix : 1;
+ u64 raz_11_17 : 7;
+ u64 nozip : 1;
+ u64 efus_ign : 1;
+ u64 bar2_sz_conf : 1;
+ u64 zip_info : 5;
+ u64 raz_26_39 : 14;
+ u64 efus_lck : 3;
+ u64 raz_43_52 : 10;
+ u64 pll_half_dis : 1;
+ u64 pll_mul : 3;
+ u64 raz_57_63 : 7;
+#endif
+ };
+};
+
#endif /* __NITROX_CSR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
new file mode 100644
index 000000000000..5f3cd5fafe04
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include "nitrox_csr.h"
+#include "nitrox_dev.h"
+
+static int firmware_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
+ return 0;
+}
+
+static int firmware_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, firmware_show, inode->i_private);
+}
+
+static const struct file_operations firmware_fops = {
+ .owner = THIS_MODULE,
+ .open = firmware_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int device_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d]\n", ndev->idx);
+ seq_printf(s, " Part Name: %s\n", ndev->hw.partname);
+ seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq);
+ seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id);
+ seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
+ seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n",
+ ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores);
+
+ return 0;
+}
+
+static int nitrox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, device_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int stats_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx);
+ seq_printf(s, " Posted: %llu\n",
+ (u64)atomic64_read(&ndev->stats.posted));
+ seq_printf(s, " Completed: %llu\n",
+ (u64)atomic64_read(&ndev->stats.completed));
+ seq_printf(s, " Dropped: %llu\n",
+ (u64)atomic64_read(&ndev->stats.dropped));
+
+ return 0;
+}
+
+static int nitrox_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stats_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+ debugfs_remove_recursive(ndev->debugfs_dir);
+ ndev->debugfs_dir = NULL;
+}
+
+int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ ndev->debugfs_dir = dir;
+ f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
+ if (!f)
+ goto err;
+
+ return 0;
+
+err:
+ nitrox_debugfs_exit(ndev);
+ return -ENODEV;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index af596455b420..283e252385fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -5,92 +5,123 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/if.h>
#define VERSION_LEN 32
+/**
+ * struct nitrox_cmdq - NITROX command queue
+ * @cmd_qlock: command queue lock
+ * @resp_qlock: response queue lock
+ * @backlog_qlock: backlog queue lock
+ * @ndev: NITROX device
+ * @response_head: submitted request list
+ * @backlog_head: backlog queue
+ * @dbell_csr_addr: doorbell register address for this queue
+ * @compl_cnt_csr_addr: completion count register address of the slc port
+ * @base: command queue base address
+ * @dma: dma address of the base
+ * @pending_count: request pending at device
+ * @backlog_count: backlog request count
+ * @write_idx: next write index for the command
+ * @instr_size: command size
+ * @qno: command queue number
+ * @qsize: command queue size
+ * @unalign_base: unaligned base address
+ * @unalign_dma: unaligned dma address
+ */
struct nitrox_cmdq {
- /* command queue lock */
- spinlock_t cmdq_lock;
- /* response list lock */
- spinlock_t response_lock;
- /* backlog list lock */
- spinlock_t backlog_lock;
-
- /* request submitted to chip, in progress */
+ spinlock_t cmd_qlock;
+ spinlock_t resp_qlock;
+ spinlock_t backlog_qlock;
+
+ struct nitrox_device *ndev;
struct list_head response_head;
- /* hw queue full, hold in backlog list */
struct list_head backlog_head;
- /* doorbell address */
u8 __iomem *dbell_csr_addr;
- /* base address of the queue */
- u8 *head;
+ u8 __iomem *compl_cnt_csr_addr;
+ u8 *base;
+ dma_addr_t dma;
- struct nitrox_device *ndev;
- /* flush pending backlog commands */
struct work_struct backlog_qflush;
- /* requests posted waiting for completion */
atomic_t pending_count;
- /* requests in backlog queues */
atomic_t backlog_count;
int write_idx;
- /* command size 32B/64B */
u8 instr_size;
u8 qno;
u32 qsize;
- /* unaligned addresses */
- u8 *head_unaligned;
- dma_addr_t dma_unaligned;
- /* dma address of the base */
- dma_addr_t dma;
+ u8 *unalign_base;
+ dma_addr_t unalign_dma;
};
+/**
+ * struct nitrox_hw - NITROX hardware information
+ * @partname: partname ex: CNN55xxx-xxx
+ * @fw_name: firmware version
+ * @freq: NITROX frequency
+ * @vendor_id: vendor ID
+ * @device_id: device ID
+ * @revision_id: revision ID
+ * @se_cores: number of symmetric cores
+ * @ae_cores: number of asymmetric cores
+ * @zip_cores: number of zip cores
+ */
struct nitrox_hw {
- /* firmware version */
+ char partname[IFNAMSIZ * 2];
char fw_name[VERSION_LEN];
+ int freq;
u16 vendor_id;
u16 device_id;
u8 revision_id;
- /* CNN55XX cores */
u8 se_cores;
u8 ae_cores;
u8 zip_cores;
};
-#define MAX_MSIX_VECTOR_NAME 20
-/**
- * vectors for queues (64 AE, 64 SE and 64 ZIP) and
- * error condition/mailbox.
- */
-#define MAX_MSIX_VECTORS 192
-
-struct nitrox_msix {
- struct msix_entry *entries;
- char **names;
- DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
- u32 nr_entries;
+struct nitrox_stats {
+ atomic64_t posted;
+ atomic64_t completed;
+ atomic64_t dropped;
};
-struct bh_data {
- /* slc port completion count address */
- u8 __iomem *completion_cnt_csr_addr;
+#define IRQ_NAMESZ 32
+
+struct nitrox_q_vector {
+ char name[IRQ_NAMESZ];
+ bool valid;
+ int ring;
+ struct tasklet_struct resp_tasklet;
+ union {
+ struct nitrox_cmdq *cmdq;
+ struct nitrox_device *ndev;
+ };
+};
- struct nitrox_cmdq *cmdq;
- struct tasklet_struct resp_handler;
+/*
+ * NITROX Device states
+ */
+enum ndev_state {
+ __NDEV_NOT_READY,
+ __NDEV_READY,
+ __NDEV_IN_RESET,
};
-struct nitrox_bh {
- struct bh_data *slc;
+/* NITROX support modes for VF(s) */
+enum vf_mode {
+ __NDEV_MODE_PF,
+ __NDEV_MODE_VF16,
+ __NDEV_MODE_VF32,
+ __NDEV_MODE_VF64,
+ __NDEV_MODE_VF128,
};
-/* NITROX-V driver state */
-#define NITROX_UCODE_LOADED 0
-#define NITROX_READY 1
+#define __NDEV_SRIOV_BIT 0
/* command queue size */
#define DEFAULT_CMD_QLEN 2048
@@ -98,7 +129,6 @@ struct nitrox_bh {
#define CMD_TIMEOUT 2000
#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
-#define PF_MODE 0
#define NITROX_CSR_ADDR(ndev, offset) \
((ndev)->bar_addr + (offset))
@@ -108,17 +138,18 @@ struct nitrox_bh {
* @list: pointer to linked list of devices
* @bar_addr: iomap address
* @pdev: PCI device information
- * @status: NITROX status
+ * @state: NITROX device state
+ * @flags: flags to indicate device the features
* @timeout: Request timeout in jiffies
* @refcnt: Device usage count
* @idx: device index (0..N)
* @node: NUMA node id attached
* @qlen: Command queue length
* @nr_queues: Number of command queues
+ * @mode: Device mode PF/VF
* @ctx_pool: DMA pool for crypto context
- * @pkt_cmdqs: SE Command queues
- * @msix: MSI-X information
- * @bh: post processing work
+ * @pkt_inq: Packet input rings
+ * @qvec: MSI-X queue vectors information
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
@@ -128,7 +159,8 @@ struct nitrox_device {
u8 __iomem *bar_addr;
struct pci_dev *pdev;
- unsigned long status;
+ atomic_t state;
+ unsigned long flags;
unsigned long timeout;
refcount_t refcnt;
@@ -136,13 +168,16 @@ struct nitrox_device {
int node;
u16 qlen;
u16 nr_queues;
+ int num_vfs;
+ enum vf_mode mode;
struct dma_pool *ctx_pool;
- struct nitrox_cmdq *pkt_cmdqs;
+ struct nitrox_cmdq *pkt_inq;
- struct nitrox_msix msix;
- struct nitrox_bh bh;
+ struct nitrox_q_vector *qvec;
+ int num_vecs;
+ struct nitrox_stats stats;
struct nitrox_hw hw;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *debugfs_dir;
@@ -173,9 +208,22 @@ static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
writeq(value, (ndev->bar_addr + offset));
}
-static inline int nitrox_ready(struct nitrox_device *ndev)
+static inline bool nitrox_ready(struct nitrox_device *ndev)
{
- return test_bit(NITROX_READY, &ndev->status);
+ return atomic_read(&ndev->state) == __NDEV_READY;
}
+#ifdef CONFIG_DEBUG_FS
+int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_exit(struct nitrox_device *ndev);
+#else
+static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ return 0;
+}
+
+static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{ }
+#endif
+
#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index ab4ccf2f9e77..a9b82387cf53 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -4,6 +4,8 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
+#define PLL_REF_CLK 50
+
/**
* emu_enable_cores - Enable EMU cluster cores.
* @ndev: N5 device
@@ -117,7 +119,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
union nps_pkt_in_instr_rsize pkt_in_rsize;
u64 offset;
@@ -256,7 +258,7 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
/* disable ILK interface */
core_gbl_vfcfg.value = 0;
core_gbl_vfcfg.s.ilk_disable = 1;
- core_gbl_vfcfg.s.cfg = PF_MODE;
+ core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
/* config input and solicit ports */
nitrox_config_pkt_input_rings(ndev);
@@ -400,3 +402,68 @@ void nitrox_config_lbc_unit(struct nitrox_device *ndev)
offset = LBC_ELM_VF65_128_INT_ENA_W1S;
nitrox_write_csr(ndev, offset, (~0ULL));
}
+
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
+{
+ union nps_core_gbl_vfcfg vfcfg;
+
+ vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
+ vfcfg.s.cfg = mode & 0x7;
+
+ nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
+}
+
+void nitrox_get_hwinfo(struct nitrox_device *ndev)
+{
+ union emu_fuse_map emu_fuse;
+ union rst_boot rst_boot;
+ union fus_dat1 fus_dat1;
+ unsigned char name[IFNAMSIZ * 2] = {};
+ int i, dead_cores;
+ u64 offset;
+
+ /* get core frequency */
+ offset = RST_BOOT;
+ rst_boot.value = nitrox_read_csr(ndev, offset);
+ ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
+
+ for (i = 0; i < NR_CLUSTERS; i++) {
+ offset = EMU_FUSE_MAPX(i);
+ emu_fuse.value = nitrox_read_csr(ndev, offset);
+ if (emu_fuse.s.valid) {
+ dead_cores = hweight32(emu_fuse.s.ae_fuse);
+ ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
+ dead_cores = hweight16(emu_fuse.s.se_fuse);
+ ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
+ }
+ }
+ /* find zip hardware availability */
+ offset = FUS_DAT1;
+ fus_dat1.value = nitrox_read_csr(ndev, offset);
+ if (!fus_dat1.nozip) {
+ dead_cores = hweight8(fus_dat1.zip_info);
+ ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
+ }
+
+ /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
+ if (ndev->hw.ae_cores == AE_MAX_CORES) {
+ switch (ndev->hw.se_cores) {
+ case SE_MAX_CORES:
+ i = snprintf(name, sizeof(name), "CNN5560");
+ break;
+ case 40:
+ i = snprintf(name, sizeof(name), "CNN5560s");
+ break;
+ }
+ } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
+ i = snprintf(name, sizeof(name), "CNN5530");
+ } else {
+ i = snprintf(name, sizeof(name), "CNN5560i");
+ }
+
+ snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
+ ndev->hw.freq, ndev->hw.revision_id);
+
+ /* copy partname */
+ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
new file mode 100644
index 000000000000..489ee64c119e
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_HAL_H
+#define __NITROX_HAL_H
+
+#include "nitrox_dev.h"
+
+void nitrox_config_emu_unit(struct nitrox_device *ndev);
+void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
+void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
+void nitrox_config_nps_unit(struct nitrox_device *ndev);
+void nitrox_config_pom_unit(struct nitrox_device *ndev);
+void nitrox_config_rand_unit(struct nitrox_device *ndev);
+void nitrox_config_efl_unit(struct nitrox_device *ndev);
+void nitrox_config_bmi_unit(struct nitrox_device *ndev);
+void nitrox_config_bmo_unit(struct nitrox_device *ndev);
+void nitrox_config_lbc_unit(struct nitrox_device *ndev);
+void invalidate_lbc(struct nitrox_device *ndev);
+void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
+void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
+void nitrox_get_hwinfo(struct nitrox_device *ndev);
+
+#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index ee0d70ba25d5..88a77b8fb3fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -6,9 +6,16 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
#include "nitrox_common.h"
+#include "nitrox_hal.h"
+/**
+ * One vector for each type of ring
+ * - NPS packet ring, AQMQ ring and ZQMQ ring
+ */
#define NR_RING_VECTORS 3
-#define NPS_CORE_INT_ACTIVE_ENTRY 192
+/* base entry for packet ring/port */
+#define PKT_RING_MSIX_BASE 0
+#define NON_RING_MSIX_BASE 192
/**
* nps_pkt_slc_isr - IRQ handler for NPS solicit port
@@ -17,13 +24,14 @@
*/
static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
{
- struct bh_data *slc = data;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = data;
+ union nps_pkt_slc_cnts slc_cnts;
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
- pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* New packet on SLC output port */
- if (pkt_slc_cnts.s.slc_int)
- tasklet_hi_schedule(&slc->resp_handler);
+ if (slc_cnts.s.slc_int)
+ tasklet_hi_schedule(&qvec->resp_tasklet);
return IRQ_HANDLED;
}
@@ -190,165 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
}
+static void nps_core_int_tasklet(unsigned long data)
+{
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_device *ndev = qvec->ndev;
+
+ /* if pf mode do queue recovery */
+ if (ndev->mode == __NDEV_MODE_PF) {
+ } else {
+ /**
+ * if VF(s) enabled communicate the error information
+ * to VF(s)
+ */
+ }
+}
+
/**
- * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
- * @ndev: NITROX device
+ * nps_core_int_isr - interrupt handler for NITROX errors and
+ * mailbox communication
*/
-static void clear_nps_core_int_active(struct nitrox_device *ndev)
+static irqreturn_t nps_core_int_isr(int irq, void *data)
{
- union nps_core_int_active core_int_active;
+ struct nitrox_device *ndev = data;
+ union nps_core_int_active core_int;
- core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
+ core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
- if (core_int_active.s.nps_core)
+ if (core_int.s.nps_core)
clear_nps_core_err_intr(ndev);
- if (core_int_active.s.nps_pkt)
+ if (core_int.s.nps_pkt)
clear_nps_pkt_err_intr(ndev);
- if (core_int_active.s.pom)
+ if (core_int.s.pom)
clear_pom_err_intr(ndev);
- if (core_int_active.s.pem)
+ if (core_int.s.pem)
clear_pem_err_intr(ndev);
- if (core_int_active.s.lbc)
+ if (core_int.s.lbc)
clear_lbc_err_intr(ndev);
- if (core_int_active.s.efl)
+ if (core_int.s.efl)
clear_efl_err_intr(ndev);
- if (core_int_active.s.bmi)
+ if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
/* If more work callback the ISR, set resend */
- core_int_active.s.resend = 1;
- nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
-}
-
-static irqreturn_t nps_core_int_isr(int irq, void *data)
-{
- struct nitrox_device *ndev = data;
-
- clear_nps_core_int_active(ndev);
+ core_int.s.resend = 1;
+ nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
return IRQ_HANDLED;
}
-static int nitrox_enable_msix(struct nitrox_device *ndev)
+void nitrox_unregister_interrupts(struct nitrox_device *ndev)
{
- struct msix_entry *entries;
- char **names;
- int i, nr_entries, ret;
-
- /*
- * PF MSI-X vectors
- *
- * Entry 0: NPS PKT ring 0
- * Entry 1: AQMQ ring 0
- * Entry 2: ZQM ring 0
- * Entry 3: NPS PKT ring 1
- * Entry 4: AQMQ ring 1
- * Entry 5: ZQM ring 1
- * ....
- * Entry 192: NPS_CORE_INT_ACTIVE
- */
- nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
- entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
- GFP_KERNEL, ndev->node);
- if (!entries)
- return -ENOMEM;
-
- names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
- if (!names) {
- kfree(entries);
- return -ENOMEM;
- }
-
- /* fill entires */
- for (i = 0; i < (nr_entries - 1); i++)
- entries[i].entry = i;
-
- entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
-
- for (i = 0; i < nr_entries; i++) {
- *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!(*(names + i))) {
- ret = -ENOMEM;
- goto msix_fail;
- }
- }
- ndev->msix.entries = entries;
- ndev->msix.names = names;
- ndev->msix.nr_entries = nr_entries;
-
- ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
- ndev->msix.nr_entries);
- if (ret) {
- dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
- ret);
- goto msix_fail;
- }
- return 0;
-
-msix_fail:
- for (i = 0; i < nr_entries; i++)
- kfree(*(names + i));
-
- kfree(entries);
- kfree(names);
- return ret;
-}
-
-static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- int i;
-
- if (!ndev->bh.slc)
- return;
-
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
-
- tasklet_disable(&bh->resp_handler);
- tasklet_kill(&bh->resp_handler);
- }
- kfree(ndev->bh.slc);
- ndev->bh.slc = NULL;
-}
-
-static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- u32 size;
+ struct pci_dev *pdev = ndev->pdev;
int i;
- size = ndev->nr_queues * sizeof(struct bh_data);
- ndev->bh.slc = kzalloc(size, GFP_KERNEL);
- if (!ndev->bh.slc)
- return -ENOMEM;
+ for (i = 0; i < ndev->num_vecs; i++) {
+ struct nitrox_q_vector *qvec;
+ int vec;
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
- u64 offset;
+ qvec = ndev->qvec + i;
+ if (!qvec->valid)
+ continue;
- offset = NPS_PKT_SLC_CNTSX(i);
- /* pre calculate completion count address */
- bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- bh->cmdq = &ndev->pkt_cmdqs[i];
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, qvec);
- tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
- (unsigned long)bh);
+ tasklet_disable(&qvec->resp_tasklet);
+ tasklet_kill(&qvec->resp_tasklet);
+ qvec->valid = false;
}
-
- return 0;
+ kfree(ndev->qvec);
+ pci_free_irq_vectors(pdev);
}
-static int nitrox_request_irqs(struct nitrox_device *ndev)
+int nitrox_register_interrupts(struct nitrox_device *ndev)
{
struct pci_dev *pdev = ndev->pdev;
- struct msix_entry *msix_ent = ndev->msix.entries;
- int nr_ring_vectors, i = 0, ring, cpu, ret;
- char *name;
+ struct nitrox_q_vector *qvec;
+ int nr_vecs, vec, cpu;
+ int ret, i;
/*
* PF MSI-X vectors
@@ -357,112 +292,76 @@ static int nitrox_request_irqs(struct nitrox_device *ndev)
* Entry 1: AQMQ ring 0
* Entry 2: ZQM ring 0
* Entry 3: NPS PKT ring 1
+ * Entry 4: AQMQ ring 1
+ * Entry 5: ZQM ring 1
* ....
* Entry 192: NPS_CORE_INT_ACTIVE
*/
- nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
-
- /* request irq for pkt ring/ports only */
- while (i < nr_ring_vectors) {
- name = *(ndev->msix.names + i);
- ring = (i / NR_RING_VECTORS);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
- ndev->idx, ring);
+ nr_vecs = pci_msix_vec_count(pdev);
- ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
- name, &ndev->bh.slc[ring]);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
- return ret;
- }
- cpu = ring % num_online_cpus();
- irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
-
- set_bit(i, ndev->msix.irqs);
- i += NR_RING_VECTORS;
- }
-
- /* Request IRQ for NPS_CORE_INT_ACTIVE */
- name = *(ndev->msix.names + i);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
- ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
return ret;
}
- set_bit(i, ndev->msix.irqs);
+ ndev->num_vecs = nr_vecs;
- return 0;
-}
-
-static void nitrox_disable_msix(struct nitrox_device *ndev)
-{
- struct msix_entry *msix_ent = ndev->msix.entries;
- char **names = ndev->msix.names;
- int i = 0, ring, nr_ring_vectors;
-
- nr_ring_vectors = ndev->msix.nr_entries - 1;
-
- /* clear pkt ring irqs */
- while (i < nr_ring_vectors) {
- if (test_and_clear_bit(i, ndev->msix.irqs)) {
- ring = (i / NR_RING_VECTORS);
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
- }
- i += NR_RING_VECTORS;
+ ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
+ if (!ndev->qvec) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
}
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, ndev);
- clear_bit(i, ndev->msix.irqs);
- kfree(ndev->msix.entries);
- for (i = 0; i < ndev->msix.nr_entries; i++)
- kfree(*(names + i));
+ /* request irqs for packet rings/ports */
+ for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
+ qvec = &ndev->qvec[i];
- kfree(names);
- pci_disable_msix(ndev->pdev);
-}
-
-/**
- * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
- * @ndev: NITROX device
- */
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
-{
- nitrox_disable_msix(ndev);
- nitrox_cleanup_pkt_slc_bh(ndev);
-}
+ qvec->ring = i / NR_RING_VECTORS;
+ if (qvec->ring >= ndev->nr_queues)
+ break;
-/**
- * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
- * @ndev: NITROX device
- *
- * Return: 0 on success, a negative value on failure.
- */
-int nitrox_pf_init_isr(struct nitrox_device *ndev)
-{
- int err;
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
+ qvec->ring);
+ goto irq_fail;
+ }
+ cpu = qvec->ring % num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
- err = nitrox_setup_pkt_slc_bh(ndev);
- if (err)
- return err;
+ tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
+ (unsigned long)qvec);
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
+ qvec->valid = true;
+ }
- err = nitrox_enable_msix(ndev);
- if (err)
- goto msix_fail;
+ /* request irqs for non ring vectors */
+ i = NON_RING_MSIX_BASE;
+ qvec = &ndev->qvec[i];
- err = nitrox_request_irqs(ndev);
- if (err)
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
goto irq_fail;
+ }
+ cpu = num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
+
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
+ (unsigned long)qvec);
+ qvec->ndev = ndev;
+ qvec->valid = true;
return 0;
irq_fail:
- nitrox_disable_msix(ndev);
-msix_fail:
- nitrox_cleanup_pkt_slc_bh(ndev);
- return err;
+ nitrox_unregister_interrupts(ndev);
+ return ret;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
new file mode 100644
index 000000000000..63418a6cc52c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_ISR_H
+#define __NITROX_ISR_H
+
+#include "nitrox_dev.h"
+
+int nitrox_register_interrupts(struct nitrox_device *ndev);
+void nitrox_unregister_interrupts(struct nitrox_device *ndev);
+
+#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df07777f..2260efa42308 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,30 +17,27 @@
#define CRYPTO_CTX_SIZE 256
-/* command queue alignments */
-#define PKT_IN_ALIGN 16
+/* packet inuput ring alignments */
+#define PKTIN_Q_ALIGN_BYTES 16
-static int cmdq_common_init(struct nitrox_cmdq *cmdq)
+static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
{
struct nitrox_device *ndev = cmdq->ndev;
- u32 qsize;
-
- qsize = (ndev->qlen) * cmdq->instr_size;
- cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
- (qsize + PKT_IN_ALIGN),
- &cmdq->dma_unaligned,
- GFP_KERNEL);
- if (!cmdq->head_unaligned)
+
+ cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
+ cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
+ if (!cmdq->unalign_base)
return -ENOMEM;
- cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
- cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
- cmdq->qsize = (qsize + PKT_IN_ALIGN);
+ cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
+ cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
cmdq->write_idx = 0;
- spin_lock_init(&cmdq->response_lock);
- spin_lock_init(&cmdq->cmdq_lock);
- spin_lock_init(&cmdq->backlog_lock);
+ spin_lock_init(&cmdq->cmd_qlock);
+ spin_lock_init(&cmdq->resp_qlock);
+ spin_lock_init(&cmdq->backlog_qlock);
INIT_LIST_HEAD(&cmdq->response_head);
INIT_LIST_HEAD(&cmdq->backlog_head);
@@ -51,68 +48,83 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
return 0;
}
-static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
+static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
+{
+ cmdq->write_idx = 0;
+ atomic_set(&cmdq->pending_count, 0);
+ atomic_set(&cmdq->backlog_count, 0);
+}
+
+static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
+ if (!cmdq->unalign_base)
+ return;
+
cancel_work_sync(&cmdq->backlog_qflush);
dma_free_coherent(DEV(ndev), cmdq->qsize,
- cmdq->head_unaligned, cmdq->dma_unaligned);
-
- atomic_set(&cmdq->pending_count, 0);
- atomic_set(&cmdq->backlog_count, 0);
+ cmdq->unalign_base, cmdq->unalign_dma);
+ nitrox_cmdq_reset(cmdq);
cmdq->dbell_csr_addr = NULL;
- cmdq->head = NULL;
+ cmdq->compl_cnt_csr_addr = NULL;
+ cmdq->unalign_base = NULL;
+ cmdq->base = NULL;
+ cmdq->unalign_dma = 0;
cmdq->dma = 0;
cmdq->qsize = 0;
cmdq->instr_size = 0;
}
-static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
+static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
{
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
- cmdq_common_cleanup(cmdq);
+ nitrox_cmdq_cleanup(cmdq);
}
- kfree(ndev->pkt_cmdqs);
- ndev->pkt_cmdqs = NULL;
+ kfree(ndev->pkt_inq);
+ ndev->pkt_inq = NULL;
}
-static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
+static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
{
- int i, err, size;
+ int i, err;
- size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
- ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
- if (!ndev->pkt_cmdqs)
+ ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
+ sizeof(struct nitrox_cmdq),
+ GFP_KERNEL, ndev->node);
+ if (!ndev->pkt_inq)
return -ENOMEM;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq;
u64 offset;
- cmdq = &ndev->pkt_cmdqs[i];
+ cmdq = &ndev->pkt_inq[i];
cmdq->ndev = ndev;
cmdq->qno = i;
cmdq->instr_size = sizeof(struct nps_pkt_instr);
+ /* packet input ring doorbell address */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
- /* SE ring doorbell address for this queue */
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+ /* packet solicit port completion count address */
+ offset = NPS_PKT_SLC_CNTSX(i);
+ cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- err = cmdq_common_init(cmdq);
+ err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
if (err)
- goto pkt_cmdq_fail;
+ goto pktq_fail;
}
return 0;
-pkt_cmdq_fail:
- nitrox_cleanup_pkt_cmdqs(ndev);
+pktq_fail:
+ nitrox_free_pktin_queues(ndev);
return err;
}
@@ -122,7 +134,7 @@ static int create_crypto_dma_pool(struct nitrox_device *ndev)
/* Crypto context pool, 16 byte aligned */
size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
- ndev->ctx_pool = dma_pool_create("crypto-context",
+ ndev->ctx_pool = dma_pool_create("nitrox-context",
DEV(ndev), size, 16, 0);
if (!ndev->ctx_pool)
return -ENOMEM;
@@ -149,7 +161,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+ vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
if (!vaddr)
return NULL;
@@ -194,7 +206,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_init_pkt_cmdqs(ndev);
+ err = nitrox_alloc_pktin_queues(ndev);
if (err)
destroy_crypto_dma_pool(ndev);
@@ -207,6 +219,6 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
*/
void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_cleanup_pkt_cmdqs(ndev);
+ nitrox_free_pktin_queues(ndev);
destroy_crypto_dma_pool(ndev);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fee7cb2ce747..6595c95af9f1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -11,13 +11,15 @@
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_csr.h"
+#include "nitrox_hal.h"
+#include "nitrox_isr.h"
#define CNN55XX_DEV_ID 0x12
#define MAX_PF_QUEUES 64
#define UCODE_HLEN 48
#define SE_GROUP 0
-#define DRIVER_VERSION "1.0"
+#define DRIVER_VERSION "1.1"
#define FW_DIR "cavium/"
/* SE microcode */
#define SE_FW FW_DIR "cnn55xx_se.fw"
@@ -42,6 +44,15 @@ static unsigned int qlen = DEFAULT_CMD_QLEN;
module_param(qlen, uint, 0644);
MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
+#ifdef CONFIG_PCI_IOV
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
+#else
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ return 0;
+}
+#endif
+
/**
* struct ucode - Firmware Header
* @id: microcode ID
@@ -136,9 +147,6 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
write_to_ucd_unit(ndev, ucode);
release_firmware(fw);
- set_bit(NITROX_UCODE_LOADED, &ndev->status);
- /* barrier to sync with other cpus */
- smp_mb__after_atomic();
return 0;
}
@@ -210,7 +218,7 @@ void nitrox_put_device(struct nitrox_device *ndev)
smp_mb__after_atomic();
}
-static int nitrox_reset_device(struct pci_dev *pdev)
+static int nitrox_device_flr(struct pci_dev *pdev)
{
int pos = 0;
@@ -220,15 +228,10 @@ static int nitrox_reset_device(struct pci_dev *pdev)
return -ENOMEM;
}
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return -ENOTTY;
+ /* check flr support */
+ if (pcie_has_flr(pdev))
+ pcie_flr(pdev);
- if (!pci_wait_for_pending_transaction(pdev))
- dev_err(&pdev->dev, "waiting for pending transaction\n");
-
- pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
pci_restore_state(pdev);
return 0;
@@ -242,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_pf_init_isr(ndev);
+ err = nitrox_register_interrupts(ndev);
if (err)
nitrox_common_sw_cleanup(ndev);
@@ -251,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_pf_cleanup_isr(ndev);
+ nitrox_unregister_interrupts(ndev);
nitrox_common_sw_cleanup(ndev);
}
@@ -284,26 +287,6 @@ static int nitrox_bist_check(struct nitrox_device *ndev)
return 0;
}
-static void nitrox_get_hwinfo(struct nitrox_device *ndev)
-{
- union emu_fuse_map emu_fuse;
- u64 offset;
- int i;
-
- for (i = 0; i < NR_CLUSTERS; i++) {
- u8 dead_cores;
-
- offset = EMU_FUSE_MAPX(i);
- emu_fuse.value = nitrox_read_csr(ndev, offset);
- if (emu_fuse.s.valid) {
- dead_cores = hweight32(emu_fuse.s.ae_fuse);
- ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
- dead_cores = hweight16(emu_fuse.s.se_fuse);
- ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
- }
- }
-}
-
static int nitrox_pf_hw_init(struct nitrox_device *ndev)
{
int err;
@@ -336,135 +319,6 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
return 0;
}
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static int registers_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
- u64 offset;
-
- /* NPS DMA stats */
- offset = NPS_STATS_PKT_DMA_RD_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = NPS_STATS_PKT_DMA_WR_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- /* BMI/BMO stats */
- offset = BMI_NPS_PKT_CNT;
- seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = BMO_NPS_SLC_PKT_CNT;
- seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- return 0;
-}
-
-static int registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, registers_show, inode->i_private);
-}
-
-static const struct file_operations register_fops = {
- .owner = THIS_MODULE,
- .open = registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int firmware_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
- return 0;
-}
-
-static int firmware_open(struct inode *inode, struct file *file)
-{
- return single_open(file, firmware_show, inode->i_private);
-}
-
-static const struct file_operations firmware_fops = {
- .owner = THIS_MODULE,
- .open = firmware_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int nitrox_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
- seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
- seq_printf(s, " Cores [AE: %u SE: %u]\n",
- ndev->hw.ae_cores, ndev->hw.se_cores);
- seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues);
- seq_printf(s, " Queue length: %u\n", ndev->qlen);
- seq_printf(s, " Node: %u\n", ndev->node);
-
- return 0;
-}
-
-static int nitrox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nitrox_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
- debugfs_remove_recursive(ndev->debugfs_dir);
- ndev->debugfs_dir = NULL;
-}
-
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- struct dentry *dir, *f;
-
- dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!dir)
- return -ENOMEM;
-
- ndev->debugfs_dir = dir;
- f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
- if (!f)
- goto err;
-
- return 0;
-
-err:
- nitrox_debugfs_exit(ndev);
- return -ENODEV;
-}
-#else
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- return 0;
-}
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
-}
-#endif
-
/**
* nitrox_probe - NITROX Initialization function.
* @pdev: PCI device information struct
@@ -487,7 +341,7 @@ static int nitrox_probe(struct pci_dev *pdev,
return err;
/* do FLR */
- err = nitrox_reset_device(pdev);
+ err = nitrox_device_flr(pdev);
if (err) {
dev_err(&pdev->dev, "FLR failed\n");
pci_disable_device(pdev);
@@ -555,7 +409,12 @@ static int nitrox_probe(struct pci_dev *pdev,
if (err)
goto pf_hw_fail;
- set_bit(NITROX_READY, &ndev->status);
+ /* clear the statistics */
+ atomic64_set(&ndev->stats.posted, 0);
+ atomic64_set(&ndev->stats.completed, 0);
+ atomic64_set(&ndev->stats.dropped, 0);
+
+ atomic_set(&ndev->state, __NDEV_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
@@ -567,7 +426,7 @@ static int nitrox_probe(struct pci_dev *pdev,
crypto_fail:
nitrox_debugfs_exit(ndev);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
pf_hw_fail:
@@ -602,11 +461,16 @@ static void nitrox_remove(struct pci_dev *pdev)
dev_info(DEV(ndev), "Removing Device %x:%x\n",
ndev->hw.vendor_id, ndev->hw.device_id);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
nitrox_remove_from_devlist(ndev);
+
+#ifdef CONFIG_PCI_IOV
+ /* disable SR-IOV */
+ nitrox_sriov_configure(pdev, 0);
+#endif
nitrox_crypto_unregister();
nitrox_debugfs_exit(ndev);
nitrox_pf_sw_cleanup(ndev);
@@ -632,6 +496,9 @@ static struct pci_driver nitrox_driver = {
.probe = nitrox_probe,
.remove = nitrox_remove,
.shutdown = nitrox_shutdown,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = nitrox_sriov_configure,
+#endif
};
module_pci_driver(nitrox_driver);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4a362fc22f62..3987cd84c033 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->backlog);
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_add_tail(&sr->backlog, &cmdq->backlog_head);
atomic_inc(&cmdq->backlog_count);
atomic_set(&sr->status, REQ_BACKLOG);
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
}
static inline void response_list_add(struct nitrox_softreq *sr,
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->response);
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_add_tail(&sr->response, &cmdq->response_head);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static inline void response_list_del(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_del(&sr->response);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static struct nitrox_softreq *
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
int idx;
u8 *ent;
- spin_lock_bh(&cmdq->cmdq_lock);
+ spin_lock_bh(&cmdq->cmd_qlock);
idx = cmdq->write_idx;
/* copy the instruction */
- ent = cmdq->head + (idx * cmdq->instr_size);
+ ent = cmdq->base + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
atomic_set(&sr->status, REQ_POSTED);
@@ -459,7 +459,10 @@ static void post_se_instr(struct nitrox_softreq *sr,
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
- spin_unlock_bh(&cmdq->cmdq_lock);
+ spin_unlock_bh(&cmdq->cmd_qlock);
+
+ /* increment the posted command count */
+ atomic64_inc(&ndev->stats.posted);
}
static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
@@ -471,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
if (!atomic_read(&cmdq->backlog_count))
return 0;
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
struct skcipher_request *skreq;
@@ -494,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* backlog requests are posted, wakeup with -EINPROGRESS */
skcipher_request_complete(skreq, -EINPROGRESS);
}
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
return ret;
}
@@ -508,8 +511,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ /* increment drop count */
+ atomic64_inc(&ndev->stats.dropped);
return -ENOSPC;
+ }
/* add to backlog list */
backlog_list_add(sr, cmdq);
return -EBUSY;
@@ -572,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* select the queue */
qno = smp_processor_id() % ndev->nr_queues;
- sr->cmdq = &ndev->pkt_cmdqs[qno];
+ sr->cmdq = &ndev->pkt_inq[qno];
/*
* 64-Byte Instruction Format
@@ -694,6 +700,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
READ_ONCE(sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
+ atomic64_inc(&ndev->stats.completed);
/* sync with other cpus */
smp_mb__after_atomic();
/* remove from response list */
@@ -714,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
}
/**
- * pkt_slc_resp_handler - post processing of SE responses
+ * pkt_slc_resp_tasklet - post processing of SE responses
*/
-void pkt_slc_resp_handler(unsigned long data)
+void pkt_slc_resp_tasklet(unsigned long data)
{
- struct bh_data *bh = (void *)(uintptr_t)(data);
- struct nitrox_cmdq *cmdq = bh->cmdq;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
+ union nps_pkt_slc_cnts slc_cnts;
/* read completion count */
- pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* resend the interrupt if more work to do */
- pkt_slc_cnts.s.resend = 1;
+ slc_cnts.s.resend = 1;
process_response_list(cmdq);
@@ -733,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
* clear the interrupt with resend bit enabled,
* MSI-X interrupt generates if Completion count > Threshold
*/
- writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
+ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
/* order the writes */
mmiowb();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
new file mode 100644
index 000000000000..30c0aa874583
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_hal.h"
+#include "nitrox_common.h"
+#include "nitrox_isr.h"
+
+static inline bool num_vfs_valid(int num_vfs)
+{
+ bool valid = false;
+
+ switch (num_vfs) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ valid = true;
+ break;
+ }
+
+ return valid;
+}
+
+static inline enum vf_mode num_vfs_to_mode(int num_vfs)
+{
+ enum vf_mode mode = 0;
+
+ switch (num_vfs) {
+ case 0:
+ mode = __NDEV_MODE_PF;
+ break;
+ case 16:
+ mode = __NDEV_MODE_VF16;
+ break;
+ case 32:
+ mode = __NDEV_MODE_VF32;
+ break;
+ case 64:
+ mode = __NDEV_MODE_VF64;
+ break;
+ case 128:
+ mode = __NDEV_MODE_VF128;
+ break;
+ }
+
+ return mode;
+}
+
+static void pf_sriov_cleanup(struct nitrox_device *ndev)
+{
+ /* PF has no queues in SR-IOV mode */
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
+ /* unregister crypto algorithms */
+ nitrox_crypto_unregister();
+
+ /* cleanup PF resources */
+ nitrox_unregister_interrupts(ndev);
+ nitrox_common_sw_cleanup(ndev);
+}
+
+static int pf_sriov_init(struct nitrox_device *ndev)
+{
+ int err;
+
+ /* allocate resources for PF */
+ err = nitrox_common_sw_init(ndev);
+ if (err)
+ return err;
+
+ err = nitrox_register_interrupts(ndev);
+ if (err) {
+ nitrox_common_sw_cleanup(ndev);
+ return err;
+ }
+
+ /* configure the packet queues */
+ nitrox_config_pkt_input_rings(ndev);
+ nitrox_config_pkt_solicit_ports(ndev);
+
+ /* set device to ready state */
+ atomic_set(&ndev->state, __NDEV_READY);
+
+ /* register crypto algorithms */
+ return nitrox_crypto_register();
+}
+
+static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs_valid(num_vfs)) {
+ dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
+ return -EINVAL;
+ }
+
+ if (pci_num_vf(pdev) == num_vfs)
+ return num_vfs;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
+ return err;
+ }
+ dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
+
+ ndev->num_vfs = num_vfs;
+ ndev->mode = num_vfs_to_mode(num_vfs);
+ /* set bit in flags */
+ set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ /* cleanup PF resources */
+ pf_sriov_cleanup(ndev);
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return num_vfs;
+}
+
+static int nitrox_sriov_disable(struct pci_dev *pdev)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+
+ if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
+ return 0;
+
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ /* clear bit in flags */
+ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ ndev->num_vfs = 0;
+ ndev->mode = __NDEV_MODE_PF;
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return pf_sriov_init(ndev);
+}
+
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (!num_vfs)
+ return nitrox_sriov_disable(pdev);
+
+ return nitrox_sriov_enable(pdev, num_vfs);
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 94b5bcf5b628..ca4630b8395f 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -102,7 +102,7 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+ return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
@@ -151,12 +151,13 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
(ctx->u.aes.key_len != AES_KEYSIZE_256))
fallback = 1;
if (fallback) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
+ ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -203,12 +204,12 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *fallback_tfm;
+ struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+ fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
@@ -226,7 +227,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
+ crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b9fd090c46c2..28819e11db96 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -88,7 +88,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_skcipher *tfm_skcipher;
+ struct crypto_sync_skcipher *tfm_skcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 72790d88236d..d64a78ccc03e 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -31,8 +31,9 @@
((psp_master->api_major) >= _maj && \
(psp_master->api_minor) >= _min)
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -423,7 +424,7 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
static int sev_get_api_version(void)
{
struct sev_user_data_status *status;
- int error, ret;
+ int error = 0, ret;
status = &psp_master->status_cmd_buf;
ret = sev_platform_status(status, &error);
@@ -440,6 +441,41 @@ static int sev_get_api_version(void)
return 0;
}
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
@@ -449,9 +485,10 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
- ret = request_firmware(&firmware, SEV_FW_FILE, dev);
- if (ret < 0)
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
return -1;
+ }
/*
* SEV FW expects the physical address given to it to be 32
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 71734f254fd1..b75dc7db2d4a 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -33,8 +33,31 @@ struct sp_platform {
unsigned int irq_count;
};
-static const struct acpi_device_id sp_acpi_match[];
-static const struct of_device_id sp_of_match[];
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
@@ -201,32 +224,6 @@ static int sp_platform_resume(struct platform_device *pdev)
}
#endif
-static const struct sp_dev_vdata dev_vdata[] = {
- {
- .bar = 0,
-#ifdef CONFIG_CRYPTO_DEV_SP_CCP
- .ccp_vdata = &ccpv3_platform,
-#endif
- },
-};
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id sp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id sp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
-
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index a091ae57f902..45985b955d2c 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
- enum drv_cipher_mode mode)
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
- enum drv_crypto_direction mode)
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 010bbf607797..db203f8be429 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -673,7 +673,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
u32 flags,
struct scatterlist *src,
struct scatterlist *dst,
@@ -683,9 +683,9 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
{
int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
- skcipher_request_set_tfm(subreq, cipher);
+ skcipher_request_set_sync_tfm(subreq, cipher);
skcipher_request_set_callback(subreq, flags, NULL, NULL);
skcipher_request_set_crypt(subreq, src, dst,
nbytes, iv);
@@ -856,13 +856,14 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
return err;
}
@@ -1337,8 +1338,7 @@ static int chcr_device_init(struct chcr_context *ctx)
}
ctx->dev = u_ctx->dev;
adap = padap(ctx->dev);
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
- adap->vres.ncrypto_fc);
+ ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
@@ -1369,8 +1369,8 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1399,8 +1399,8 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1415,7 +1415,7 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_skcipher(ablkctx->sw_cipher);
+ crypto_free_sync_skcipher(ablkctx->sw_cipher);
if (ablkctx->aes_generic)
crypto_free_cipher(ablkctx->aes_generic);
}
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 62249d4ed373..2c472e3c6aeb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -43,7 +43,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
- .ntxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 0d2c70c344f3..d37ef41f9ebe 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -170,7 +170,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
}
struct ablk_ctx {
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
struct crypto_cipher *aes_generic;
__be32 key_ctx_hdr;
unsigned int enckey_len;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 0997e166ea57..20209e29f814 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -234,8 +234,7 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
return;
out:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void release_tcp_port(struct sock *sk)
@@ -406,12 +405,10 @@ static int wait_for_states(struct sock *sk, unsigned int states)
int chtls_disconnect(struct sock *sk, int flags)
{
- struct chtls_sock *csk;
struct tcp_sock *tp;
int err;
tp = tcp_sk(sk);
- csk = rcu_dereference_sk_user_data(sk);
chtls_purge_recv_queue(sk);
chtls_purge_receive_queue(sk);
chtls_purge_write_queue(sk);
@@ -1014,7 +1011,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
- const struct tcphdr *tcph;
struct inet_sock *newinet;
const struct iphdr *iph;
struct net_device *ndev;
@@ -1036,7 +1032,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (!dst)
goto free_sk;
- tcph = (struct tcphdr *)(iph + 1);
n = dst_neigh_lookup(dst, &iph->saddr);
if (!n)
goto free_sk;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f59b044ebd25..f472c51abe56 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -272,8 +272,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
kfree_skb(cdev->rspq_skb_cache[i]);
kfree(cdev->lldi);
- if (cdev->askb)
- kfree_skb(cdev->askb);
+ kfree_skb(cdev->askb);
kfree(cdev);
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 56bd28174f52..4e6ff32f8a7e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -28,9 +28,24 @@
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
+#define DCP_SHA_PAY_SZ 64
#define DCP_ALIGNMENT 64
+/*
+ * Null hashes to align with hw behavior on imx6sl and ull
+ * these are flipped for consistency with hw output
+ */
+static const uint8_t sha1_null_hash[] =
+ "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
+ "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
+
+static const uint8_t sha256_null_hash[] =
+ "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
+ "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
+ "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
+ "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
+
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr;
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
uint8_t aes_in_buf[DCP_BUF_SZ];
uint8_t aes_out_buf[DCP_BUF_SZ];
uint8_t sha_in_buf[DCP_BUF_SZ];
+ uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128];
@@ -84,7 +100,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -99,6 +115,11 @@ struct dcp_sha_req_ctx {
unsigned int fini:1;
};
+struct dcp_export_state {
+ struct dcp_sha_req_ctx req_ctx;
+ struct dcp_async_ctx async_ctx;
+};
+
/*
* There can even be only one instance of the MXS DCP due to the
* design of Linux Crypto API.
@@ -209,6 +230,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
+ if (actx->fill % AES_BLOCK_SIZE) {
+ dev_err(sdcp->dev, "Invalid block size!\n");
+ ret = -EINVAL;
+ goto aes_done_run;
+ }
+
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@@ -238,6 +265,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
+aes_done_run:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -264,13 +292,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
uint32_t dst_off = 0;
+ uint32_t last_out_len = 0;
uint8_t *key = sdcp->coh->aes_key;
int ret = 0;
int split = 0;
- unsigned int i, len, clen, rem = 0;
+ unsigned int i, len, clen, rem = 0, tlen = 0;
int init = 0;
+ bool limit_hit = false;
actx->fill = 0;
@@ -289,6 +319,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
for_each_sg(req->src, src, nents, i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
+ tlen += len;
+ limit_hit = tlen > req->nbytes;
+
+ if (limit_hit)
+ len = req->nbytes - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -305,13 +340,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* If we filled the buffer or this is the last SG,
* submit the buffer.
*/
- if (actx->fill == out_off || sg_is_last(src)) {
+ if (actx->fill == out_off || sg_is_last(src) ||
+ limit_hit) {
ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
out_tmp = out_buf;
+ last_out_len = actx->fill;
while (dst && actx->fill) {
if (!split) {
dst_buf = sg_virt(dst);
@@ -334,6 +371,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
}
}
} while (len);
+
+ if (limit_hit)
+ break;
+ }
+
+ /* Copy the IV for CBC for chaining */
+ if (!rctx->ecb) {
+ if (rctx->enc)
+ memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
+ else
+ memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
}
return ret;
@@ -380,10 +430,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -464,16 +514,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(actx->fallback,
+ crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(actx->fallback, key, len);
+ ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
if (!ret)
return 0;
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -482,11 +532,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -499,7 +548,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
{
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(actx->fallback);
+ crypto_free_sync_skcipher(actx->fallback);
}
/*
@@ -513,8 +562,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
-
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t digest_phys = 0;
@@ -536,10 +583,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
desc->payload = 0;
desc->status = 0;
+ /*
+ * Align driver with hw behavior when generating null hashes
+ */
+ if (rctx->init && rctx->fini && desc->size == 0) {
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+ const uint8_t *sha_buf =
+ (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
+ sha1_null_hash : sha256_null_hash;
+ memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
+ ret = 0;
+ goto done_run;
+ }
+
/* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) {
- digest_phys = dma_map_single(sdcp->dev, req->result,
- halg->digestsize, DMA_FROM_DEVICE);
+ digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
+ DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
@@ -547,9 +607,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
ret = mxs_dcp_start_dma(actx);
if (rctx->fini)
- dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
+ dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
DMA_FROM_DEVICE);
+done_run:
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret;
@@ -567,6 +628,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
+ uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf;
@@ -621,11 +683,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
actx->fill = 0;
- /* For some reason, the result is flipped. */
- for (i = 0; i < halg->digestsize / 2; i++) {
- swap(req->result[i],
- req->result[halg->digestsize - i - 1]);
- }
+ /* For some reason the result is flipped */
+ for (i = 0; i < halg->digestsize; i++)
+ req->result[i] = out_buf[halg->digestsize - i - 1];
}
return 0;
@@ -766,14 +826,32 @@ static int dcp_sha_digest(struct ahash_request *req)
return dcp_sha_finup(req);
}
-static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+static int dcp_sha_import(struct ahash_request *req, const void *in)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ const struct dcp_export_state *export = in;
+
+ memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
+ memset(actx, 0, sizeof(struct dcp_async_ctx));
+ memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
+ memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
-static int dcp_sha_noexport(struct ahash_request *req, void *out)
+static int dcp_sha_export(struct ahash_request *req, void *out)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
+ struct dcp_export_state *export = out;
+
+ memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
+ memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
@@ -846,10 +924,11 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-dcp",
@@ -872,10 +951,11 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-dcp",
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9019f6b67986..a553ffddb11b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -522,9 +522,9 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC));
if (req->nbytes < aes_fallback_sz) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -564,11 +564,11 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
return 0;
@@ -613,11 +613,10 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -667,7 +666,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback)
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index fc3b46a85809..7e02920ef6f8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -101,7 +101,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct crypto_skcipher *ctr;
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 321d5e2ac833..a28f1d18fe01 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -799,17 +799,17 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- crypto_skcipher_clear_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher,
cipher->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+ err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
if (err)
@@ -914,7 +914,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
/*
@@ -922,7 +922,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
* the ciphering has completed, put the old transform back into the
* request.
*/
- skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -1020,9 +1020,8 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_sync_skcipher(
+ alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->cra_name);
@@ -1041,7 +1040,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->sw_cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1138e41d6805..d2698299896f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ };
+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+ char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_ablkcipher_ctx {
@@ -148,37 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- char ipad[block_size];
- char opad[block_size];
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
+ memset(ctx->ipad, 0, block_size);
+ memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ipad);
+ auth_keylen, ctx->ipad);
if (ret)
return ret;
- memcpy(opad, ipad, digest_size);
+ memcpy(ctx->opad, ctx->ipad, digest_size);
} else {
- memcpy(ipad, auth_key, auth_keylen);
- memcpy(opad, auth_key, auth_keylen);
+ memcpy(ctx->ipad, auth_key, auth_keylen);
+ memcpy(ctx->opad, auth_key, auth_keylen);
}
for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ipad + i;
- char *opad_ptr = opad + i;
+ char *ipad_ptr = ctx->ipad + i;
+ char *opad_ptr = ctx->opad + i;
*ipad_ptr ^= HMAC_IPAD_VALUE;
*opad_ptr ^= HMAC_OPAD_VALUE;
}
@@ -186,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, ipad, block_size))
+ if (crypto_shash_update(shash, ctx->ipad, block_size))
return -EFAULT;
hash_state_out = (__be32 *)hash->sha.state1;
@@ -194,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
@@ -218,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, opad, block_size))
+ if (crypto_shash_update(shash, ctx->opad, block_size))
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
@@ -227,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
}
- memzero_explicit(ipad, block_size);
- memzero_explicit(opad, block_size);
+ memzero_explicit(ctx->ipad, block_size);
+ memzero_explicit(ctx->opad, block_size);
return 0;
}
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ea4d96bf47e8..585e1cab9ae3 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
fallback:
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -212,9 +212,9 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -245,9 +245,8 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
memset(ctx, 0, sizeof(*ctx));
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
@@ -255,7 +254,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 2b0278bb6e92..ee055bfe98a0 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index faa282074e5a..0064be0e3941 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -249,8 +249,8 @@ struct s5p_aes_reqctx {
struct s5p_aes_ctx {
struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
+ u8 aes_key[AES_MAX_KEY_SIZE];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
int keylen;
};
@@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_aes_complete(struct ablkcipher_request *req, int err)
{
- dev->req->base.complete(&dev->req->base, err);
+ req->base.complete(&req->base, err);
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
}
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
- struct scatterlist **dst)
+ struct scatterlist **dst)
{
void *pages;
int len;
@@ -518,46 +518,28 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
+ return -ENOMEM;
dev->sg_dst = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
+ return -ENOMEM;
dev->sg_src = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
/*
@@ -655,14 +637,14 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+ struct ablkcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
bool tx_end = false;
bool hx_end = false;
unsigned long flags;
- uint32_t status;
- u32 st_bits;
+ u32 status, st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -727,7 +709,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, 0);
+ s5p_aes_complete(dev->req, 0);
/* Device is still busy */
tasklet_schedule(&dev->tasklet);
} else {
@@ -752,11 +734,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
error:
s5p_sg_done(dev);
dev->busy = false;
+ req = dev->req;
if (err_dma_hx == 1)
s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
hash_irq_end:
/*
@@ -1830,7 +1813,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
};
static void s5p_set_aes(struct s5p_aes_dev *dev,
- const uint8_t *key, const uint8_t *iv,
+ const u8 *key, const u8 *iv, const u8 *ctr,
unsigned int keylen)
{
void __iomem *keystart;
@@ -1838,6 +1821,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
if (iv)
memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ if (ctr)
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
+
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
else if (keylen == AES_KEYSIZE_192)
@@ -1887,7 +1873,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1916,11 +1902,12 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
+ u32 aes_control;
unsigned long flags;
int err;
- u8 *iv;
+ u8 *iv, *ctr;
+ /* This sets bit [13:12] to 00, which selects 128-bit counter */
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
@@ -1928,11 +1915,14 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
iv = req->info;
+ ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
- iv = req->info;
+ iv = NULL;
+ ctr = req->info;
} else {
iv = NULL; /* AES_ECB */
+ ctr = NULL;
}
if (dev->ctx->keylen == AES_KEYSIZE_192)
@@ -1964,7 +1954,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst);
@@ -1983,7 +1973,7 @@ indata_error:
s5p_sg_done(dev);
dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
}
static void s5p_tasklet_cb(unsigned long data)
@@ -2024,7 +2014,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
err = ablkcipher_enqueue_request(&dev->queue, req);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
- goto exit;
+ return err;
}
dev->busy = true;
@@ -2032,7 +2022,6 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet);
-exit:
return err;
}
@@ -2043,7 +2032,8 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -2054,7 +2044,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
}
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2090,6 +2080,11 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
+static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+{
+ return s5p_aes_crypt(req, FLAGS_AES_CTR);
+}
+
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2144,6 +2139,28 @@ static struct crypto_alg algs[] = {
.decrypt = s5p_aes_cbc_decrypt,
}
},
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-s5p",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ }
+ },
};
static int s5p_aes_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index e7540a5b8197..bbf166a97ad3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -149,7 +149,7 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct sahara_aes_reqctx {
@@ -621,14 +621,14 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback.
*/
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -666,9 +666,9 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -688,9 +688,9 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -710,9 +710,9 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -732,9 +732,9 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -752,8 +752,7 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
@@ -769,7 +768,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
{
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index b71895871be3..c5c5ff82b52e 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -32,7 +32,7 @@
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
@@ -40,11 +40,11 @@ struct p8_aes_cbc_ctx {
static int p8_aes_cbc_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
@@ -53,7 +53,7 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -66,7 +66,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -86,7 +86,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -100,8 +100,8 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
@@ -139,8 +139,8 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_decrypt(req);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index cd777c75291d..8a2fe092cb8e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -32,18 +32,18 @@
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -51,7 +51,7 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -64,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -119,8 +119,8 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index e9954a7d4694..ecd64e5cc5bb 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -33,7 +33,7 @@
#include "aesp8-ppc.h"
struct p8_aes_xts_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
@@ -42,11 +42,11 @@ struct p8_aes_xts_ctx {
static int p8_aes_xts_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -54,7 +54,7 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -67,7 +67,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -92,7 +92,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -109,8 +109,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e1fa6baf4e8e..bb3096bf2cc6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
memset(result + size, 0, JOURNAL_MAC_SIZE - size);
} else {
- __u8 digest[size];
+ __u8 digest[HASH_MAX_DIGESTSIZE];
+
+ if (WARN_ON(size > sizeof(digest))) {
+ dm_integrity_io_error(ic, "digest_size", -EINVAL);
+ goto err;
+ }
r = crypto_shash_final(desc, digest);
if (unlikely(r)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[ic->tag_size + extra_space];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
unsigned sectors_to_process = dio->range.n_sectors;
sector_t sector = dio->range.logical_sector;
@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w)
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
- if (!checksums)
+ if (!checksums) {
checksums = checksums_onstack;
+ if (WARN_ON(extra_space &&
+ digest_size > sizeof(checksums_onstack))) {
+ r = -EINVAL;
+ goto error;
+ }
+ }
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos;
@@ -1546,7 +1557,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -1596,7 +1607,7 @@ retry_kmap:
if (ic->internal_hash) {
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
if (unlikely(digest_size > ic->tag_size)) {
- char checksums_onstack[digest_size];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
unlikely(from_replay) &&
#endif
ic->internal_hash) {
- char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 684af08d0747..0ce04e5b4afb 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved;
u8 *bbuf, *rs_block;
- u8 want_digest[v->digest_size];
+ u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned n, k;
if (neras)
*neras = 0;
+ if (WARN_ON(v->digest_size > sizeof(want_digest)))
+ return -EINVAL;
+
/*
* read each of the rsn data blocks that are part of the RS block, and
* interleave contents to available bufs
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 267322693ed5..9a6065a3fa46 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
if (!txq_info)
return -ENOMEM;
+ if (uld_type == CXGB4_ULD_CRYPTO) {
+ i = min_t(int, adap->vres.ncrypto_fc,
+ num_online_cpus());
+ txq_info->ntxq = rounddown(i, adap->params.nports);
+ if (txq_info->ntxq <= 0) {
+ dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
+ kfree(txq_info);
+ return -EINVAL;
+ }
- i = min_t(int, uld_info->ntxq, num_online_cpus());
- txq_info->ntxq = roundup(i, adap->params.nports);
-
+ } else {
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+ }
txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
GFP_KERNEL);
if (!txq_info->uldtxq) {
@@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int tx_uld_type = TX_ULD(uld_type);
+ struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
lli->rxq_ids = rxq_info->rspq_id;
lli->nrxq = rxq_info->nrxq;
lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
lli->nciq = rxq_info->nciq;
+ lli->ntxq = txq_info->ntxq;
}
int t4_uld_mem_alloc(struct adapter *adap)
@@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index a205750b431b..7ccdc62c6052 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -95,7 +95,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_skcipher *arc4;
+ struct crypto_sync_skcipher *arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,15 +155,15 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
struct scatterlist sg_in[1], sg_out[1];
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
@@ -181,7 +181,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->session_key,
+ state->keylen);
skcipher_request_zero(req);
}
@@ -203,7 +204,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
@@ -250,7 +251,7 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -266,7 +267,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
}
}
@@ -366,7 +367,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
int err;
struct scatterlist sg_in[1], sg_out[1];
@@ -426,7 +427,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
err = crypto_skcipher_encrypt(req);
@@ -480,7 +481,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
struct scatterlist sg_in[1], sg_out[1];
@@ -615,7 +616,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
if (crypto_skcipher_decrypt(req)) {
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 9b17f72349ed..321a92613a7e 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -310,6 +310,37 @@ int dpaa2_io_service_rearm(struct dpaa2_io *d,
EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
* dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
* @d: the given DPIO service.
* @channelid: the given channel id.
@@ -342,6 +373,33 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 9f18be14dda6..f38f1f74fcd6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -49,9 +49,9 @@ struct rtllib_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
@@ -66,8 +66,7 @@ static void *rtllib_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->tx_tfm_arc4 = NULL;
@@ -81,8 +80,7 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->rx_tfm_arc4 = NULL;
@@ -100,9 +98,9 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -116,9 +114,9 @@ static void rtllib_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -337,7 +335,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -349,8 +347,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -420,7 +418,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
@@ -447,8 +445,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
@@ -664,9 +662,9 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct rtllib_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index b3343a5d0fd6..d11ec39171d5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -27,8 +27,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -41,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->rx_tfm = NULL;
@@ -61,8 +61,8 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
- crypto_free_skcipher(priv->tx_tfm);
- crypto_free_skcipher(priv->rx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -74,8 +74,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -146,8 +146,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
err = crypto_skcipher_encrypt(req);
@@ -199,11 +199,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 1088fa0aee0e..829fa4bd253c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -53,9 +53,9 @@ struct ieee80211_tkip_data {
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -71,8 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -88,8 +87,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -110,9 +108,9 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -126,9 +124,9 @@ static void ieee80211_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -340,7 +338,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -348,9 +346,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -418,7 +416,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
@@ -440,10 +438,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
@@ -663,9 +661,9 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index b9f86be9e52b..d4a1bf0caa7a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -32,8 +32,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm))
goto free_priv;
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm))
goto free_tx;
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
return priv;
free_tx:
- crypto_free_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
free_priv:
kfree(priv);
return NULL;
@@ -70,8 +70,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -128,7 +128,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -138,10 +138,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
@@ -193,12 +193,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index aff50eb09ca9..68ddee86a886 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -189,7 +189,7 @@ struct wusb_mac_scratch {
* NOTE: blen is not aligned to a block size, we'll pad zeros, that's
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
-static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_sync_skcipher *tfm_cbc,
struct crypto_cipher *tfm_aes,
struct wusb_mac_scratch *scratch,
void *mic,
@@ -198,7 +198,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
size_t blen)
{
int result = 0;
- SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -224,7 +224,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
if (!dst_buf)
goto error_dst_buf;
- iv = kzalloc(crypto_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
+ iv = kzalloc(crypto_sync_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
if (!iv)
goto error_iv;
@@ -251,7 +251,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
- skcipher_request_set_tfm(req, tfm_cbc);
+ skcipher_request_set_sync_tfm(req, tfm_cbc);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
result = crypto_skcipher_encrypt(req);
@@ -298,19 +298,19 @@ ssize_t wusb_prf(void *out, size_t out_size,
{
ssize_t result, bytes = 0, bitr;
struct aes_ccm_nonce n = *_n;
- struct crypto_skcipher *tfm_cbc;
+ struct crypto_sync_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
- tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_cbc = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
if (IS_ERR(tfm_cbc)) {
result = PTR_ERR(tfm_cbc);
printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
goto error_alloc_cbc;
}
- result = crypto_skcipher_setkey(tfm_cbc, key, 16);
+ result = crypto_sync_skcipher_setkey(tfm_cbc, key, 16);
if (result < 0) {
printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
goto error_setkey_cbc;
@@ -351,7 +351,7 @@ error_setkey_aes:
crypto_free_cipher(tfm_aes);
error_alloc_aes:
error_setkey_cbc:
- crypto_free_skcipher(tfm_cbc);
+ crypto_free_sync_skcipher(tfm_cbc);
error_alloc_cbc:
return result;
}