summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-28 16:04:56 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-28 16:04:56 -0700
commit6159c49e12284b4880fd60e0575a71a40556a67e (patch)
treeb969ffe7cd182d77052f91ec3d221e6726f0457c /drivers
parent31e798fd6f0ff0acdc49c1a358b581730936a09a (diff)
parent9f38b678ffc4e2ccf167a1131c0403dc4f5e1bb7 (diff)
downloadlinux-6159c49e12284b4880fd60e0575a71a40556a67e.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Algorithms: - Fix rmmod crash with x86/curve25519 - Add ECDH NIST P384 - Generate assembly files at build-time with perl scripts on arm - Switch to HMAC SHA512 DRBG as default DRBG Drivers: - Add sl3516 crypto engine - Add ECDH NIST P384 support in hisilicon/hpre - Add {ofb,cfb,ctr} over {aes,sm4} in hisilicon/sec - Add {ccm,gcm} over {aes,sm4} in hisilicon/sec - Enable omap hwrng driver for TI K3 family - Add support for AEAD algorithms in qce" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (142 commits) crypto: sl3516 - depends on HAS_IOMEM crypto: hisilicon/qm - implement for querying hardware tasks status. crypto: sl3516 - Fix build warning without CONFIG_PM MAINTAINERS: update caam crypto driver maintainers list crypto: nx - Fix numerous sparse byte-order warnings crypto: nx - Fix RCU warning in nx842_OF_upd_status crypto: api - Move crypto attr definitions out of crypto.h crypto: nx - Fix memcpy() over-reading in nonce crypto: hisilicon/sec - Fix spelling mistake "fallbcak" -> "fallback" crypto: sa2ul - Remove unused auth_len variable crypto: sl3516 - fix duplicated inclusion crypto: hisilicon/zip - adds the max shaper type rate crypto: hisilicon/hpre - adds the max shaper type rate crypto: hisilicon/sec - adds the max shaper type rate crypto: hisilicon/qm - supports to inquiry each function's QoS crypto: hisilicon/qm - add pf ping single vf function crypto: hisilicon/qm - merges the work initialization process into a single function crypto: hisilicon/qm - add the "alg_qos" file node crypto: hisilicon/qm - supports writing QoS int the host crypto: api - remove CRYPTOA_U32 and related functions ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig10
-rw-r--r--drivers/char/hw_random/amd-rng.c2
-rw-r--r--drivers/char/hw_random/core.c38
-rw-r--r--drivers/char/hw_random/exynos-trng.c7
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/crypto/Kconfig41
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c21
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c16
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c3
-rw-r--r--drivers/crypto/ccp/sev-dev.c4
-rw-r--r--drivers/crypto/ccp/sp-pci.c6
-rw-r--r--drivers/crypto/gemini/Makefile2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c387
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c535
-rw-r--r--drivers/crypto/gemini/sl3516-ce-rng.c61
-rw-r--r--drivers/crypto/gemini/sl3516-ce.h347
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c185
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c256
-rw-r--r--drivers/crypto/hisilicon/qm.c1843
-rw-r--r--drivers/crypto/hisilicon/qm.h17
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h23
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c1036
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h193
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c100
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c99
-rw-r--r--drivers/crypto/ixp4xx_crypto.c413
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile13
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c93
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h36
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h23
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h16
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c9
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c160
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c32
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c49
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c43
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c17
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c31
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c4
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c4
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-sha256.c19
-rw-r--r--drivers/crypto/nx/nx-sha512.c19
-rw-r--r--drivers/crypto/nx/nx_csbcpb.h4
-rw-r--r--drivers/crypto/omap-des.c9
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c14
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c12
-rw-r--r--drivers/crypto/qce/Makefile1
-rw-r--r--drivers/crypto/qce/aead.c847
-rw-r--r--drivers/crypto/qce/aead.h56
-rw-r--r--drivers/crypto/qce/common.c196
-rw-r--r--drivers/crypto/qce/common.h9
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/skcipher.c19
-rw-r--r--drivers/crypto/sa2ul.c50
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c7
75 files changed, 6511 insertions, 1005 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1fe006f3f12f..c11f12d4ab53 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,17 +165,17 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
default HW_RANDOM
help
- This driver provides kernel-side support for the Random Number
+ This driver provides kernel-side support for the Random Number
Generator hardware found on OMAP16xx, OMAP2/3/4/5, AM33xx/AM43xx
multimedia processors, and Marvell Armada 7k/8k SoCs.
To compile this driver as a module, choose M here: the
module will be called omap-rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
@@ -485,13 +485,13 @@ config HW_RANDOM_NPCM
depends on ARCH_NPCM || COMPILE_TEST
default HW_RANDOM
help
- This driver provides support for the Random Number
+ This driver provides support for the Random Number
Generator hardware available in Nuvoton NPCM SoCs.
To compile this driver as a module, choose M here: the
module will be called npcm-rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_KEYSTONE
depends on ARCH_KEYSTONE || COMPILE_TEST
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 9959c762da2f..d8d4ef5214a1 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -126,7 +126,7 @@ static struct hwrng amd_rng = {
static int __init mod_init(void)
{
- int err = -ENODEV;
+ int err;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
u32 pmbase;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index adb3c2bd7783..a3db27916256 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -319,11 +319,11 @@ static int enable_best_rng(void)
return ret;
}
-static ssize_t hwrng_attr_current_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t rng_current_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
- int err = -ENODEV;
+ int err;
struct hwrng *rng, *old_rng, *new_rng;
err = mutex_lock_interruptible(&rng_mutex);
@@ -354,9 +354,9 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
return err ? : len;
}
-static ssize_t hwrng_attr_current_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_current_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
ssize_t ret;
struct hwrng *rng;
@@ -371,9 +371,9 @@ static ssize_t hwrng_attr_current_show(struct device *dev,
return ret;
}
-static ssize_t hwrng_attr_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int err;
struct hwrng *rng;
@@ -392,22 +392,16 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
-static ssize_t hwrng_attr_selected_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rng_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
-static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
- hwrng_attr_current_show,
- hwrng_attr_current_store);
-static DEVICE_ATTR(rng_available, S_IRUGO,
- hwrng_attr_available_show,
- NULL);
-static DEVICE_ATTR(rng_selected, S_IRUGO,
- hwrng_attr_selected_show,
- NULL);
+static DEVICE_ATTR_RW(rng_current);
+static DEVICE_ATTR_RO(rng_available);
+static DEVICE_ATTR_RO(rng_selected);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 8e1fe3f8dd2d..9cc3d542dd0f 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -132,7 +132,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->mem);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Could not get runtime PM.\n");
goto err_pm_get;
@@ -165,7 +165,7 @@ err_register:
clk_disable_unprepare(trng->clk);
err_clock:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
err_pm_get:
pm_runtime_disable(&pdev->dev);
@@ -196,10 +196,9 @@ static int __maybe_unused exynos_trng_resume(struct device *dev)
{
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Could not get runtime PM.\n");
- pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 8f1d47ff9799..2f2f21f1b659 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -241,10 +241,9 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index cede9f159102..00ff96703dd2 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -454,10 +454,9 @@ static int omap_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
}
@@ -543,10 +542,9 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
struct omap_rng_dev *priv = dev_get_drvdata(dev);
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
- pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9a4c275a1335..ebcec460c045 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -266,6 +266,27 @@ config CRYPTO_DEV_NIAGARA2
Group, which can perform encryption, decryption, hashing,
checksumming, and raw copies.
+config CRYPTO_DEV_SL3516
+ tristate "Stormlink SL3516 crypto offloader"
+ depends on HAS_IOMEM
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_ECB
+ select CRYPTO_AES
+ select HW_RANDOM
+ depends on PM
+ help
+ This option allows you to have support for SL3516 crypto offloader.
+
+config CRYPTO_DEV_SL3516_DEBUG
+ bool "Enable SL3516 stats"
+ depends on CRYPTO_DEV_SL3516
+ depends on DEBUG_FS
+ help
+ Say y to enable SL3516 debug stats.
+ This will create /sys/kernel/debug/sl3516/stats for displaying
+ the number of requests per algorithm and other internal stats.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_LIB_DES
@@ -325,6 +346,11 @@ config CRYPTO_DEV_TALITOS2
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_LIB_DES
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -627,6 +653,12 @@ config CRYPTO_DEV_QCE_SHA
select CRYPTO_SHA1
select CRYPTO_SHA256
+config CRYPTO_DEV_QCE_AEAD
+ bool
+ depends on CRYPTO_DEV_QCE
+ select CRYPTO_AUTHENC
+ select CRYPTO_LIB_DES
+
choice
prompt "Algorithms enabled for QCE acceleration"
default CRYPTO_DEV_QCE_ENABLE_ALL
@@ -647,6 +679,7 @@ choice
bool "All supported algorithms"
select CRYPTO_DEV_QCE_SKCIPHER
select CRYPTO_DEV_QCE_SHA
+ select CRYPTO_DEV_QCE_AEAD
help
Enable all supported algorithms:
- AES (CBC, CTR, ECB, XTS)
@@ -672,6 +705,14 @@ choice
- SHA1, HMAC-SHA1
- SHA256, HMAC-SHA256
+ config CRYPTO_DEV_QCE_ENABLE_AEAD
+ bool "AEAD algorithms only"
+ select CRYPTO_DEV_QCE_AEAD
+ help
+ Enable AEAD algorithms only:
+ - authenc()
+ - ccm(aes)
+ - rfc4309(ccm(aes))
endchoice
config CRYPTO_DEV_QCE_SW_MAX_LEN
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fa22cb19e242..1fe5120eb966 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ee42e8a245..8c32d0eb8fcf 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -401,7 +401,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
}
-/**
+/*
* Ensure all cores are disengaged from all groups by
* calling cpt_disable_all_cores() before calling this
* function.
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 4fe7898c8561..153004bdfb5c 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -9,8 +9,8 @@
/**
* get_free_pending_entry - get free entry from pending queue
- * @param pqinfo: pending_qinfo structure
- * @param qno: queue number
+ * @q: pending queue
+ * @qlen: queue length
*/
static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
int qlen)
@@ -244,11 +244,7 @@ static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
memcpy(ent, (void *)cmd, qinfo->cmd_size);
if (++queue->idx >= queue->qhead->size / 64) {
- struct hlist_node *node;
-
- hlist_for_each(node, &queue->chead) {
- chunk = hlist_entry(node, struct command_chunk,
- nextchunk);
+ hlist_for_each_entry(chunk, &queue->chead, nextchunk) {
if (chunk == queue->qhead) {
continue;
} else {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index c288c4b51783..f19e520da6d0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -307,6 +307,10 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
* Entry 192: NPS_CORE_INT_ACTIVE
*/
nr_vecs = pci_msix_vec_count(pdev);
+ if (nr_vecs < 0) {
+ dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
+ return nr_vecs;
+ }
/* Enable MSI-X */
ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index d385daf2c71c..96bc7b5c6532 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -35,7 +35,7 @@ static LIST_HEAD(ndevlist);
static DEFINE_MUTEX(devlist_lock);
static unsigned int num_devices;
-/**
+/*
* nitrox_pci_tbl - PCI Device ID Table
*/
static const struct pci_device_id nitrox_pci_tbl[] = {
@@ -65,7 +65,7 @@ struct ucode {
u64 code[];
};
-/**
+/*
* write_to_ucd_unit - Write Firmware to NITROX UCD unit
*/
static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
@@ -424,8 +424,7 @@ static int nitrox_probe(struct pci_dev *pdev,
err = nitrox_device_flr(pdev);
if (err) {
dev_err(&pdev->dev, "FLR failed\n");
- pci_disable_device(pdev);
- return err;
+ goto flr_fail;
}
if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
@@ -434,16 +433,13 @@ static int nitrox_probe(struct pci_dev *pdev,
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "DMA configuration failed\n");
- pci_disable_device(pdev);
- return err;
+ goto flr_fail;
}
}
err = pci_request_mem_regions(pdev, nitrox_driver_name);
- if (err) {
- pci_disable_device(pdev);
- return err;
- }
+ if (err)
+ goto flr_fail;
pci_set_master(pdev);
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
@@ -479,7 +475,7 @@ static int nitrox_probe(struct pci_dev *pdev,
err = nitrox_pf_sw_init(ndev);
if (err)
- goto ioremap_err;
+ goto pf_sw_fail;
err = nitrox_pf_hw_init(ndev);
if (err)
@@ -509,12 +505,15 @@ crypto_fail:
smp_mb__after_atomic();
pf_hw_fail:
nitrox_pf_sw_cleanup(ndev);
+pf_sw_fail:
+ iounmap(ndev->bar_addr);
ioremap_err:
nitrox_remove_from_devlist(ndev);
kfree(ndev);
pci_set_drvdata(pdev, NULL);
ndev_fail:
pci_release_mem_regions(pdev);
+flr_fail:
pci_disable_device(pdev);
return err;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index c1af9d4fca6e..2e9c0d214363 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -8,7 +8,7 @@
#define RING_TO_VFNO(_x, _y) ((_x) / (_y))
-/**
+/*
* mbx_msg_type - Mailbox message types
*/
enum mbx_msg_type {
@@ -18,7 +18,7 @@ enum mbx_msg_type {
MBX_MSG_TYPE_NACK,
};
-/**
+/*
* mbx_msg_opcode - Mailbox message opcodes
*/
enum mbx_msg_opcode {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index df95ba26b414..55c18da4a500 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -19,7 +19,7 @@
#define REQ_BACKLOG 2
#define REQ_POSTED 3
-/**
+/*
* Response codes from SE microcode
* 0x00 - Success
* Completion with no error
@@ -159,7 +159,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
struct se_crypto_request *req)
{
struct device *dev = DEV(sr->ndev);
- struct scatterlist *sg = req->src;
+ struct scatterlist *sg;
int i, nents, ret = 0;
nents = dma_map_sg(dev, req->src, sg_nents(req->src),
@@ -279,6 +279,7 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
/**
* post_se_instr - Post SE instruction to Packet Input ring
* @sr: Request structure
+ * @cmdq: Command queue structure
*
* Returns 0 if successful or a negative error code,
* if no space in ring.
@@ -369,9 +370,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
}
/**
- * nitrox_se_request - Send request to SE core
+ * nitrox_process_se_request - Send request to SE core
* @ndev: NITROX device
* @req: Crypto request
+ * @callback: Completion callback
+ * @cb_arg: Completion callback arguments
*
* Returns 0 on success, or a negative error code.
*/
@@ -526,9 +529,8 @@ static bool sr_completed(struct nitrox_softreq *sr)
}
/**
- * process_request_list - process completed requests
- * @ndev: N5 device
- * @qno: queue to operate
+ * process_response_list - process completed requests
+ * @cmdq: Command queue structure
*
* Returns the number of responses processed.
*/
@@ -578,7 +580,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
}
}
-/**
+/*
* pkt_slc_resp_tasklet - post processing of SE responses
*/
void pkt_slc_resp_tasklet(unsigned long data)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index a553ac65f324..248b4fff1c72 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -20,7 +20,7 @@ struct nitrox_cipher {
enum flexi_cipher value;
};
-/**
+/*
* supported cipher list
*/
static const struct nitrox_cipher flexi_cipher_table[] = {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 6777582aa1ce..9ce4b68e9c48 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -470,7 +470,7 @@ int ccp_cmd_queue_thread(void *data)
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
- * @dev: device struct of the CCP
+ * @sp: sp_device struct of the CCP
*/
struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
{
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 0770a83bf1a5..d718db224be4 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -307,8 +307,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc);
- list_del(&desc->entry);
- list_add_tail(&desc->entry, &chan->pending);
+ list_move_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 3506b2050fb8..91808402e0bf 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -43,6 +43,10 @@ static int psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
+
static bool psp_dead;
static int psp_timeout;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f468594ef8af..6fb6ba35f89d 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -222,7 +222,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret);
- goto e_err;
+ goto free_irqs;
}
}
@@ -230,10 +230,12 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = sp_init(sp);
if (ret)
- goto e_err;
+ goto free_irqs;
return 0;
+free_irqs:
+ sp_free_irqs(sp);
e_err:
dev_notice(dev, "initialization failed\n");
return ret;
diff --git a/drivers/crypto/gemini/Makefile b/drivers/crypto/gemini/Makefile
new file mode 100644
index 000000000000..c73c8b69260d
--- /dev/null
+++ b/drivers/crypto/gemini/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SL3516) += sl3516-ce.o
+sl3516-ce-y += sl3516-ce-core.o sl3516-ce-cipher.o sl3516-ce-rng.o
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
new file mode 100644
index 000000000000..b41c2f5fc495
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sl3516-ce-cipher.c - hardware cryptographic offloader for Stormlink SL3516 SoC
+ *
+ * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * This file adds support for AES cipher with 128,192,256 bits keysize in
+ * ECB mode.
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include "sl3516-ce.h"
+
+/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
+static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_dev *ce = op->ce;
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ struct scatterlist *sg;
+
+ if (areq->cryptlen == 0 || areq->cryptlen % 16) {
+ ce->fallback_mod16++;
+ return true;
+ }
+
+ /*
+ * check if we have enough descriptors for TX
+ * Note: TX need one control desc for each SG
+ */
+ if (sg_nents(areq->src) > MAXDESC / 2) {
+ ce->fallback_sg_count_tx++;
+ return true;
+ }
+ /* check if we have enough descriptors for RX */
+ if (sg_nents(areq->dst) > MAXDESC) {
+ ce->fallback_sg_count_rx++;
+ return true;
+ }
+
+ sg = areq->src;
+ while (sg) {
+ if ((sg->length % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if ((sg_dma_len(sg) % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ ce->fallback_align16++;
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+ sg = areq->dst;
+ while (sg) {
+ if ((sg->length % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if ((sg_dma_len(sg) % 16) != 0) {
+ ce->fallback_mod16++;
+ return true;
+ }
+ if (!IS_ALIGNED(sg->offset, 16)) {
+ ce->fallback_align16++;
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+
+ /* need same numbers of SG (with same length) for source and destination */
+ in_sg = areq->src;
+ out_sg = areq->dst;
+ while (in_sg && out_sg) {
+ if (in_sg->length != out_sg->length) {
+ ce->fallback_not_same_len++;
+ return true;
+ }
+ in_sg = sg_next(in_sg);
+ out_sg = sg_next(out_sg);
+ }
+ if (in_sg || out_sg)
+ return true;
+
+ return false;
+}
+
+static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sl3516_ce_alg_template *algt;
+ int err;
+
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt->stat_fb++;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->op_dir == CE_DECRYPTION)
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ return err;
+}
+
+static int sl3516_ce_cipher(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_dev *ce = op->ce;
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sl3516_ce_alg_template *algt;
+ struct scatterlist *sg;
+ unsigned int todo, len;
+ struct pkt_control_ecb *ecb;
+ int nr_sgs = 0;
+ int nr_sgd = 0;
+ int err = 0;
+ int i;
+
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+
+ dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
+ crypto_tfm_alg_name(areq->base.tfm),
+ areq->cryptlen,
+ rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
+ op->keylen);
+
+ algt->stat_req++;
+
+ if (areq->src == areq->dst) {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = nr_sgs;
+ } else {
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+ nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->src;
+ while (i < nr_sgs && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgs_next;
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo;
+ dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+sgs_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ len = areq->cryptlen;
+ i = 0;
+ sg = areq->dst;
+ while (i < nr_sgd && sg && len) {
+ if (sg_dma_len(sg) == 0)
+ goto sgd_next;
+ rctx->t_dst[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_dst[i].len = todo;
+ dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
+ areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
+ len -= todo;
+ i++;
+
+sgd_next:
+ sg = sg_next(sg);
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+
+ switch (algt->mode) {
+ case ECB_AES:
+ rctx->pctrllen = sizeof(struct pkt_control_ecb);
+ ecb = (struct pkt_control_ecb *)ce->pctrl;
+
+ rctx->tqflag = TQ0_TYPE_CTRL;
+ rctx->tqflag |= TQ1_CIPHER;
+ ecb->control.op_mode = rctx->op_dir;
+ ecb->control.cipher_algorithm = ECB_AES;
+ ecb->cipher.header_len = 0;
+ ecb->cipher.algorithm_len = areq->cryptlen;
+ cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
+ rctx->h = &ecb->cipher;
+
+ rctx->tqflag |= TQ4_KEY0;
+ rctx->tqflag |= TQ5_KEY4;
+ rctx->tqflag |= TQ6_KEY6;
+ ecb->control.aesnk = op->keylen / 4;
+ break;
+ }
+
+ rctx->nr_sgs = nr_sgs;
+ rctx->nr_sgd = nr_sgd;
+ err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+theend_sgs:
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
+ }
+
+theend:
+
+ return err;
+}
+
+static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+{
+ int err;
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+
+ err = sl3516_ce_cipher(breq);
+ crypto_finalize_skcipher_request(engine, breq, err);
+
+ return 0;
+}
+
+int sl3516_ce_skdecrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+
+ memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
+ rctx->op_dir = CE_DECRYPTION;
+
+ if (sl3516_ce_need_fallback(areq))
+ return sl3516_ce_cipher_fallback(areq);
+
+ engine = op->ce->engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sl3516_ce_skencrypt(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_engine *engine;
+
+ memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
+ rctx->op_dir = CE_ENCRYPTION;
+
+ if (sl3516_ce_need_fallback(areq))
+ return sl3516_ce_cipher_fallback(areq);
+
+ engine = op->ce->engine;
+
+ return crypto_transfer_skcipher_request_to_engine(engine, areq);
+}
+
+int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct sl3516_ce_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
+ int err;
+
+ memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
+
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ op->ce = algt->ce;
+
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&sktfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
+
+ op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+
+ return 0;
+error_pm:
+ pm_runtime_put_noidle(op->ce->dev);
+ crypto_free_skcipher(op->fallback_tfm);
+ return err;
+}
+
+void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
+
+ kfree_sensitive(op->key);
+ crypto_free_skcipher(op->fallback_tfm);
+ pm_runtime_put_sync_suspend(op->ce->dev);
+}
+
+int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sl3516_ce_dev *ce = op->ce;
+
+ switch (keylen) {
+ case 128 / 8:
+ break;
+ case 192 / 8:
+ break;
+ case 256 / 8:
+ break;
+ default:
+ dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
+ return -EINVAL;
+ }
+ kfree_sensitive(op->key);
+ op->keylen = keylen;
+ op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!op->key)
+ return -ENOMEM;
+
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
+}
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
new file mode 100644
index 000000000000..da6cd529a6c0
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sl3516-ce-core.c - hardware cryptographic offloader for Stormlink SL3516 SoC
+ *
+ * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * Core file which registers crypto algorithms supported by the CryptoEngine
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sl3516-ce.h"
+
+static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
+{
+ const size_t sz = sizeof(struct descriptor) * MAXDESC;
+ int i;
+
+ ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
+ if (!ce->tx)
+ return -ENOMEM;
+ ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
+ if (!ce->rx)
+ goto err_rx;
+
+ for (i = 0; i < MAXDESC; i++) {
+ ce->tx[i].frame_ctrl.bits.own = CE_CPU;
+ ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
+ }
+ ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
+
+ for (i = 0; i < MAXDESC; i++) {
+ ce->rx[i].frame_ctrl.bits.own = CE_CPU;
+ ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
+ }
+ ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
+
+ ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
+ &ce->dctrl, GFP_KERNEL);
+ if (!ce->pctrl)
+ goto err_pctrl;
+
+ return 0;
+err_pctrl:
+ dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
+err_rx:
+ dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
+ return -ENOMEM;
+}
+
+static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
+{
+ const size_t sz = sizeof(struct descriptor) * MAXDESC;
+
+ dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
+ dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
+ dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
+ ce->dctrl);
+}
+
+static void start_dma_tx(struct sl3516_ce_dev *ce)
+{
+ u32 v;
+
+ v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
+ TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
+
+ writel(v, ce->base + IPSEC_TXDMA_CTRL);
+}
+
+static void start_dma_rx(struct sl3516_ce_dev *ce)
+{
+ u32 v;
+
+ v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
+ RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
+ RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
+ RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
+
+ writel(v, ce->base + IPSEC_RXDMA_CTRL);
+}
+
+static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
+{
+ struct descriptor *dd;
+
+ dd = &ce->tx[ce->ctx];
+ ce->ctx++;
+ if (ce->ctx >= MAXDESC)
+ ce->ctx = 0;
+ return dd;
+}
+
+static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
+{
+ struct descriptor *rdd;
+
+ rdd = &ce->rx[ce->crx];
+ ce->crx++;
+ if (ce->crx >= MAXDESC)
+ ce->crx = 0;
+ return rdd;
+}
+
+int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
+ const char *name)
+{
+ struct descriptor *dd, *rdd = NULL;
+ u32 v;
+ int i, err = 0;
+
+ ce->stat_req++;
+
+ reinit_completion(&ce->complete);
+ ce->status = 0;
+
+ for (i = 0; i < rctx->nr_sgd; i++) {
+ dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
+ i, rctx->nr_sgd, rctx->t_dst[i].len);
+ rdd = get_desc_rx(ce);
+ rdd->buf_adr = rctx->t_dst[i].addr;
+ rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
+ rdd->frame_ctrl.bits.own = CE_DMA;
+ }
+ rdd->next_desc.bits.eofie = 1;
+
+ for (i = 0; i < rctx->nr_sgs; i++) {
+ dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
+ i, rctx->nr_sgs, rctx->t_src[i].len);
+ rctx->h->algorithm_len = rctx->t_src[i].len;
+
+ dd = get_desc_tx(ce);
+ dd->frame_ctrl.raw = 0;
+ dd->flag_status.raw = 0;
+ dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
+ dd->buf_adr = ce->dctrl;
+ dd->flag_status.tx_flag.tqflag = rctx->tqflag;
+ dd->next_desc.bits.eofie = 0;
+ dd->next_desc.bits.dec = 0;
+ dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
+ dd->frame_ctrl.bits.own = CE_DMA;
+
+ dd = get_desc_tx(ce);
+ dd->frame_ctrl.raw = 0;
+ dd->flag_status.raw = 0;
+ dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
+ dd->buf_adr = rctx->t_src[i].addr;
+ dd->flag_status.tx_flag.tqflag = 0;
+ dd->next_desc.bits.eofie = 0;
+ dd->next_desc.bits.dec = 0;
+ dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
+ dd->frame_ctrl.bits.own = CE_DMA;
+ start_dma_tx(ce);
+ start_dma_rx(ce);
+ }
+ wait_for_completion_interruptible_timeout(&ce->complete,
+ msecs_to_jiffies(5000));
+ if (ce->status == 0) {
+ dev_err(ce->dev, "DMA timeout for %s\n", name);
+ err = -EFAULT;
+ }
+ v = readl(ce->base + IPSEC_STATUS_REG);
+ if (v & 0xFFF) {
+ dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+static irqreturn_t ce_irq_handler(int irq, void *data)
+{
+ struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
+ u32 v;
+
+ ce->stat_irq++;
+
+ v = readl(ce->base + IPSEC_DMA_STATUS);
+ writel(v, ce->base + IPSEC_DMA_STATUS);
+
+ if (v & DMA_STATUS_TS_DERR)
+ dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
+ if (v & DMA_STATUS_TS_PERR)
+ dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
+ if (v & DMA_STATUS_RS_DERR)
+ dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
+ if (v & DMA_STATUS_RS_PERR)
+ dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
+
+ if (v & DMA_STATUS_TS_EOFI)
+ ce->stat_irq_tx++;
+ if (v & DMA_STATUS_RS_EOFI) {
+ ce->status = 1;
+ complete(&ce->complete);
+ ce->stat_irq_rx++;
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct sl3516_ce_alg_template ce_algs[] = {
+{
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .mode = ECB_AES,
+ .alg.skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sl3516",
+ .cra_priority = 400,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0xf,
+ .cra_init = sl3516_ce_cipher_init,
+ .cra_exit = sl3516_ce_cipher_exit,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sl3516_ce_aes_setkey,
+ .encrypt = sl3516_ce_skencrypt,
+ .decrypt = sl3516_ce_skdecrypt,
+ }
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct sl3516_ce_dev *ce = seq->private;
+ unsigned int i;
+
+ seq_printf(seq, "HWRNG %lu %lu\n",
+ ce->hwrng_stat_req, ce->hwrng_stat_bytes);
+ seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
+ seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
+ seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
+ seq_printf(seq, "nreq %lu\n", ce->stat_req);
+ seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
+ seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
+ seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
+ seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
+ seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ ce_algs[i].alg.skcipher.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ }
+ }
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
+#endif
+
+static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ ce_algs[i].ce = ce;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "DEBUG: Register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ if (err) {
+ dev_err(ce->dev, "Fail to register %s\n",
+ ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ default:
+ ce_algs[i].ce = NULL;
+ dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
+ }
+ }
+ return 0;
+}
+
+static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
+ if (!ce_algs[i].ce)
+ continue;
+ switch (ce_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.skcipher.base.cra_name);
+ crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ break;
+ }
+ }
+}
+
+static void sl3516_ce_start(struct sl3516_ce_dev *ce)
+{
+ ce->ctx = 0;
+ ce->crx = 0;
+ writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
+ writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
+ writel(0, ce->base + IPSEC_DMA_STATUS);
+}
+
+/*
+ * Power management strategy: The device is suspended unless a TFM exists for
+ * one of the algorithms proposed by this driver.
+ */
+static int sl3516_ce_pm_suspend(struct device *dev)
+{
+ struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
+
+ reset_control_assert(ce->reset);
+ clk_disable_unprepare(ce->clks);
+ return 0;
+}
+
+static int sl3516_ce_pm_resume(struct device *dev)
+{
+ struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(ce->clks);
+ if (err) {
+ dev_err(ce->dev, "Cannot prepare_enable\n");
+ goto error;
+ }
+ err = reset_control_deassert(ce->reset);
+ if (err) {
+ dev_err(ce->dev, "Cannot deassert reset control\n");
+ goto error;
+ }
+
+ sl3516_ce_start(ce);
+
+ return 0;
+error:
+ sl3516_ce_pm_suspend(dev);
+ return err;
+}
+
+static const struct dev_pm_ops sl3516_ce_pm_ops = {
+ SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
+};
+
+static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
+{
+ int err;
+
+ pm_runtime_use_autosuspend(ce->dev);
+ pm_runtime_set_autosuspend_delay(ce->dev, 2000);
+
+ err = pm_runtime_set_suspended(ce->dev);
+ if (err)
+ return err;
+ pm_runtime_enable(ce->dev);
+ return err;
+}
+
+static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
+{
+ pm_runtime_disable(ce->dev);
+}
+
+static int sl3516_ce_probe(struct platform_device *pdev)
+{
+ struct sl3516_ce_dev *ce;
+ int err, irq;
+ u32 v;
+
+ ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ ce->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ce);
+
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ce->base))
+ return PTR_ERR(ce->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
+ if (err) {
+ dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
+ return err;
+ }
+
+ ce->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(ce->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
+ "No reset control found\n");
+ ce->clks = devm_clk_get(ce->dev, NULL);
+ if (IS_ERR(ce->clks)) {
+ err = PTR_ERR(ce->clks);
+ dev_err(ce->dev, "Cannot get clock err=%d\n", err);
+ return err;
+ }
+
+ err = sl3516_ce_desc_init(ce);
+ if (err)
+ return err;
+
+ err = sl3516_ce_pm_init(ce);
+ if (err)
+ goto error_pm;
+
+ init_completion(&ce->complete);
+
+ ce->engine = crypto_engine_alloc_init(ce->dev, true);
+ if (!ce->engine) {
+ dev_err(ce->dev, "Cannot allocate engine\n");
+ err = -ENOMEM;
+ goto error_engine;
+ }
+
+ err = crypto_engine_start(ce->engine);
+ if (err) {
+ dev_err(ce->dev, "Cannot start engine\n");
+ goto error_engine;
+ }
+
+ err = sl3516_ce_register_algs(ce);
+ if (err)
+ goto error_alg;
+
+ err = sl3516_ce_rng_register(ce);
+ if (err)
+ goto error_rng;
+
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
+ goto error_pmuse;
+
+ v = readl(ce->base + IPSEC_ID);
+ dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
+ v & GENMASK(31, 4),
+ v & GENMASK(3, 0));
+ v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
+ dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
+ v & GENMASK(15, 4),
+ v & GENMASK(3, 0));
+
+ pm_runtime_put_sync(ce->dev);
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ /* Ignore error of debugfs */
+ ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
+ ce->dbgfs_stats = debugfs_create_file("stats", 0444,
+ ce->dbgfs_dir, ce,
+ &sl3516_ce_debugfs_fops);
+#endif
+
+ return 0;
+error_pmuse:
+ sl3516_ce_rng_unregister(ce);
+error_rng:
+ sl3516_ce_unregister_algs(ce);
+error_alg:
+ crypto_engine_exit(ce->engine);
+error_engine:
+ sl3516_ce_pm_exit(ce);
+error_pm:
+ sl3516_ce_free_descs(ce);
+ return err;
+}
+
+static int sl3516_ce_remove(struct platform_device *pdev)
+{
+ struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
+
+ sl3516_ce_rng_unregister(ce);
+ sl3516_ce_unregister_algs(ce);
+ crypto_engine_exit(ce->engine);
+ sl3516_ce_pm_exit(ce);
+ sl3516_ce_free_descs(ce);
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ debugfs_remove_recursive(ce->dbgfs_dir);
+#endif
+
+ return 0;
+}
+
+static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
+ { .compatible = "cortina,sl3516-crypto"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
+
+static struct platform_driver sl3516_ce_driver = {
+ .probe = sl3516_ce_probe,
+ .remove = sl3516_ce_remove,
+ .driver = {
+ .name = "sl3516-crypto",
+ .pm = &sl3516_ce_pm_ops,
+ .of_match_table = sl3516_ce_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sl3516_ce_driver);
+
+MODULE_DESCRIPTION("SL3516 cryptographic offloader");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
diff --git a/drivers/crypto/gemini/sl3516-ce-rng.c b/drivers/crypto/gemini/sl3516-ce-rng.c
new file mode 100644
index 000000000000..76931ec1cec5
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce-rng.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sl3516-ce-rng.c - hardware cryptographic offloader for SL3516 SoC.
+ *
+ * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the RNG found in the SL3516 crypto engine
+ */
+#include "sl3516-ce.h"
+#include <linux/pm_runtime.h>
+#include <linux/hw_random.h>
+
+static int sl3516_ce_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct sl3516_ce_dev *ce;
+ u32 *data = buf;
+ size_t read = 0;
+ int err;
+
+ ce = container_of(rng, struct sl3516_ce_dev, trng);
+
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ ce->hwrng_stat_req++;
+ ce->hwrng_stat_bytes += max;
+#endif
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ce->dev);
+ return err;
+ }
+
+ while (read < max) {
+ *data = readl(ce->base + IPSEC_RAND_NUM_REG);
+ data++;
+ read += 4;
+ }
+
+ pm_runtime_put(ce->dev);
+
+ return read;
+}
+
+int sl3516_ce_rng_register(struct sl3516_ce_dev *ce)
+{
+ int ret;
+
+ ce->trng.name = "SL3516 Crypto Engine RNG";
+ ce->trng.read = sl3516_ce_rng_read;
+ ce->trng.quality = 700;
+
+ ret = hwrng_register(&ce->trng);
+ if (ret)
+ dev_err(ce->dev, "Fail to register the RNG\n");
+ return ret;
+}
+
+void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce)
+{
+ hwrng_unregister(&ce->trng);
+}
diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h
new file mode 100644
index 000000000000..4c0ec6c920d1
--- /dev/null
+++ b/drivers/crypto/gemini/sl3516-ce.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sl3516-ce.h - hardware cryptographic offloader for cortina/gemini SoC
+ *
+ * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
+ *
+ * General notes on this driver:
+ * Called either Crypto Acceleration Engine Module, Security Acceleration Engine
+ * or IPSEC module in the datasheet, it will be called Crypto Engine for short
+ * in this driver.
+ * The CE was designed to handle IPSEC and wifi(TKIP WEP) protocol.
+ * It can handle AES, DES, 3DES, MD5, WEP, TKIP, SHA1, HMAC(MD5), HMAC(SHA1),
+ * Michael cipher/digest suites.
+ * It acts the same as a network hw, with both RX and TX chained descriptors.
+ */
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/hw_random.h>
+
+#define TQ0_TYPE_DATA 0
+#define TQ0_TYPE_CTRL BIT(0)
+#define TQ1_CIPHER BIT(1)
+#define TQ2_AUTH BIT(2)
+#define TQ3_IV BIT(3)
+#define TQ4_KEY0 BIT(4)
+#define TQ5_KEY4 BIT(5)
+#define TQ6_KEY6 BIT(6)
+#define TQ7_AKEY0 BIT(7)
+#define TQ8_AKEY2 BIT(8)
+#define TQ9_AKEY2 BIT(9)
+
+#define ECB_AES 0x2
+
+#define DESC_LAST 0x01
+#define DESC_FIRST 0x02
+
+#define IPSEC_ID 0x0000
+#define IPSEC_STATUS_REG 0x00a8
+#define IPSEC_RAND_NUM_REG 0x00ac
+#define IPSEC_DMA_DEVICE_ID 0xff00
+#define IPSEC_DMA_STATUS 0xff04
+#define IPSEC_TXDMA_CTRL 0xff08
+#define IPSEC_TXDMA_FIRST_DESC 0xff0c
+#define IPSEC_TXDMA_CURR_DESC 0xff10
+#define IPSEC_RXDMA_CTRL 0xff14
+#define IPSEC_RXDMA_FIRST_DESC 0xff18
+#define IPSEC_RXDMA_CURR_DESC 0xff1c
+#define IPSEC_TXDMA_BUF_ADDR 0xff28
+#define IPSEC_RXDMA_BUF_ADDR 0xff38
+#define IPSEC_RXDMA_BUF_SIZE 0xff30
+
+#define CE_ENCRYPTION 0x01
+#define CE_DECRYPTION 0x03
+
+#define MAXDESC 6
+
+#define DMA_STATUS_RS_EOFI BIT(22)
+#define DMA_STATUS_RS_PERR BIT(24)
+#define DMA_STATUS_RS_DERR BIT(25)
+#define DMA_STATUS_TS_EOFI BIT(27)
+#define DMA_STATUS_TS_PERR BIT(29)
+#define DMA_STATUS_TS_DERR BIT(30)
+
+#define TXDMA_CTRL_START BIT(31)
+#define TXDMA_CTRL_CONTINUE BIT(30)
+#define TXDMA_CTRL_CHAIN_MODE BIT(29)
+/* the burst value is not documented in the datasheet */
+#define TXDMA_CTRL_BURST_UNK BIT(22)
+#define TXDMA_CTRL_INT_FAIL BIT(17)
+#define TXDMA_CTRL_INT_PERR BIT(16)
+
+#define RXDMA_CTRL_START BIT(31)
+#define RXDMA_CTRL_CONTINUE BIT(30)
+#define RXDMA_CTRL_CHAIN_MODE BIT(29)
+/* the burst value is not documented in the datasheet */
+#define RXDMA_CTRL_BURST_UNK BIT(22)
+#define RXDMA_CTRL_INT_FINISH BIT(18)
+#define RXDMA_CTRL_INT_FAIL BIT(17)
+#define RXDMA_CTRL_INT_PERR BIT(16)
+#define RXDMA_CTRL_INT_EOD BIT(15)
+#define RXDMA_CTRL_INT_EOF BIT(14)
+
+#define CE_CPU 0
+#define CE_DMA 1
+
+/*
+ * struct sl3516_ce_descriptor - descriptor for CE operations
+ * @frame_ctrl: Information for the current descriptor
+ * @flag_status: For send packet, describe flag of operations.
+ * @buf_adr: pointer to a send/recv buffer for data packet
+ * @next_desc: control linking to other descriptors
+ */
+struct descriptor {
+ union {
+ u32 raw;
+ /*
+ * struct desc_frame_ctrl - Information for the current descriptor
+ * @buffer_size: the size of buffer at buf_adr
+ * @desc_count: Upon completion of a DMA operation, DMA
+ * write the number of descriptors used
+ * for the current frame
+ * @checksum: unknown
+ * @authcomp: unknown
+ * @perr: Protocol error during processing this descriptor
+ * @derr: Data error during processing this descriptor
+ * @own: 0 if owned by CPU, 1 for DMA
+ */
+ struct desc_frame_ctrl {
+ u32 buffer_size :16;
+ u32 desc_count :6;
+ u32 checksum :6;
+ u32 authcomp :1;
+ u32 perr :1;
+ u32 derr :1;
+ u32 own :1;
+ } bits;
+ } frame_ctrl;
+
+ union {
+ u32 raw;
+ /*
+ * struct desc_flag_status - flag for this descriptor
+ * @tqflag: list of flag describing the type of operation
+ * to be performed.
+ */
+ struct desc_tx_flag_status {
+ u32 tqflag :10;
+ u32 unused :22;
+ } tx_flag;
+ } flag_status;
+
+ u32 buf_adr;
+
+ union {
+ u32 next_descriptor;
+ /*
+ * struct desc_next - describe chaining of descriptors
+ * @sof_eof: does the descriptor is first (0x11),
+ * the last (0x01), middle of a chan (0x00)
+ * or the only one (0x11)
+ * @dec: AHB bus address increase (0), decrease (1)
+ * @eofie: End of frame interrupt enable
+ * @ndar: Next descriptor address
+ */
+ struct desc_next {
+ u32 sof_eof :2;
+ u32 dec :1;
+ u32 eofie :1;
+ u32 ndar :28;
+ } bits;
+ } next_desc;
+};
+
+/*
+ * struct control - The value of this register is used to set the
+ * operation mode of the IPSec Module.
+ * @process_id: Used to identify the process. The number will be copied
+ * to the descriptor status of the received packet.
+ * @auth_check_len: Number of 32-bit words to be checked or appended by the
+ * authentication module
+ * @auth_algorithm:
+ * @auth_mode: 0:append 1:Check Authentication Result
+ * @fcs_stream_copy: 0:enable 1:disable authentication stream copy
+ * @mix_key_sel: 0:use rCipherKey0-3 1:use Key Mixer
+ * @aesnk: AES Key Size
+ * @cipher_algorithm: choice of CBC/ECE and AES/DES/3DES
+ * @op_mode: Operation Mode for the IPSec Module
+ */
+struct pkt_control_header {
+ u32 process_id :8;
+ u32 auth_check_len :3;
+ u32 un1 :1;
+ u32 auth_algorithm :3;
+ u32 auth_mode :1;
+ u32 fcs_stream_copy :1;
+ u32 un2 :2;
+ u32 mix_key_sel :1;
+ u32 aesnk :4;
+ u32 cipher_algorithm :3;
+ u32 un3 :1;
+ u32 op_mode :4;
+};
+
+struct pkt_control_cipher {
+ u32 algorithm_len :16;
+ u32 header_len :16;
+};
+
+/*
+ * struct pkt_control_ecb - control packet for ECB
+ */
+struct pkt_control_ecb {
+ struct pkt_control_header control;
+ struct pkt_control_cipher cipher;
+ unsigned char key[AES_MAX_KEY_SIZE];
+};
+
+/*
+ * struct sl3516_ce_dev - main container for all this driver information
+ * @base: base address
+ * @clks: clocks used
+ * @reset: pointer to reset controller
+ * @dev: the platform device
+ * @engine: ptr to the crypto/crypto_engine
+ * @complete: completion for the current task on this flow
+ * @status: set to 1 by interrupt if task is done
+ * @dtx: base DMA address for TX descriptors
+ * @tx base address of TX descriptors
+ * @drx: base DMA address for RX descriptors
+ * @rx base address of RX descriptors
+ * @ctx current used TX descriptor
+ * @crx current used RX descriptor
+ * @trng hw_random structure for RNG
+ * @hwrng_stat_req number of HWRNG requests
+ * @hwrng_stat_bytes total number of bytes generated by RNG
+ * @stat_irq number of IRQ handled by CE
+ * @stat_irq_tx number of TX IRQ handled by CE
+ * @stat_irq_rx number of RX IRQ handled by CE
+ * @stat_req number of requests handled by CE
+ * @fallbak_sg_count_tx number of fallback due to destination SG count
+ * @fallbak_sg_count_rx number of fallback due to source SG count
+ * @fallbak_not_same_len number of fallback due to difference in SG length
+ * @dbgfs_dir: Debugfs dentry for statistic directory
+ * @dbgfs_stats: Debugfs dentry for statistic counters
+ */
+struct sl3516_ce_dev {
+ void __iomem *base;
+ struct clk *clks;
+ struct reset_control *reset;
+ struct device *dev;
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+ dma_addr_t dtx;
+ struct descriptor *tx;
+ dma_addr_t drx;
+ struct descriptor *rx;
+ int ctx;
+ int crx;
+ struct hwrng trng;
+ unsigned long hwrng_stat_req;
+ unsigned long hwrng_stat_bytes;
+ unsigned long stat_irq;
+ unsigned long stat_irq_tx;
+ unsigned long stat_irq_rx;
+ unsigned long stat_req;
+ unsigned long fallback_sg_count_tx;
+ unsigned long fallback_sg_count_rx;
+ unsigned long fallback_not_same_len;
+ unsigned long fallback_mod16;
+ unsigned long fallback_align16;
+#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
+#endif
+ void *pctrl;
+ dma_addr_t dctrl;
+};
+
+struct sginfo {
+ u32 addr;
+ u32 len;
+};
+
+/*
+ * struct sl3516_ce_cipher_req_ctx - context for a skcipher request
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @pctrllen: the length of the ctrl packet
+ * @tqflag: the TQflag to set in data packet
+ * @h pointer to the pkt_control_cipher header
+ * @nr_sgs: number of source SG
+ * @nr_sgd: number of destination SG
+ * @fallback_req: request struct for invoking the fallback skcipher TFM
+ */
+struct sl3516_ce_cipher_req_ctx {
+ struct sginfo t_src[MAXDESC];
+ struct sginfo t_dst[MAXDESC];
+ u32 op_dir;
+ unsigned int pctrllen;
+ u32 tqflag;
+ struct pkt_control_cipher *h;
+ int nr_sgs;
+ int nr_sgd;
+ struct skcipher_request fallback_req; // keep at the end
+};
+
+/*
+ * struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @key: pointer to key data
+ * @keylen: len of the key
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ *
+ * enginectx must be the first element
+ */
+struct sl3516_ce_cipher_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ u32 *key;
+ u32 keylen;
+ struct sl3516_ce_dev *ce;
+ struct crypto_skcipher *fallback_tfm;
+};
+
+/*
+ * struct sl3516_ce_alg_template - crypto_alg template
+ * @type: the CRYPTO_ALG_TYPE for this template
+ * @mode: value to be used in control packet for this algorithm
+ * @ce: pointer to the sl3516_ce_dev structure associated with
+ * this template
+ * @alg: one of sub struct must be used
+ * @stat_req: number of request done on this template
+ * @stat_fb: number of request which has fallbacked
+ * @stat_bytes: total data size done by this template
+ */
+struct sl3516_ce_alg_template {
+ u32 type;
+ u32 mode;
+ struct sl3516_ce_dev *ce;
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+ unsigned long stat_req;
+ unsigned long stat_fb;
+ unsigned long stat_bytes;
+};
+
+int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type);
+
+int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sl3516_ce_cipher_init(struct crypto_tfm *tfm);
+void sl3516_ce_cipher_exit(struct crypto_tfm *tfm);
+int sl3516_ce_skdecrypt(struct skcipher_request *areq);
+int sl3516_ce_skencrypt(struct skcipher_request *areq);
+
+int sl3516_ce_run_task(struct sl3516_ce_dev *ce,
+ struct sl3516_ce_cipher_req_ctx *rctx, const char *name);
+
+int sl3516_ce_rng_register(struct sl3516_ce_dev *ce);
+void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a380087c83f7..a032c192ef1d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -5,6 +5,7 @@
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
+#include <crypto/rng.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/rsa.h>
@@ -30,7 +31,6 @@ struct hpre_ctx;
#define HPRE_DH_G_FLAG 0x02
#define HPRE_TRY_SEND_TIMES 100
#define HPRE_INVLD_REQ_ID (-1)
-#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev))
#define HPRE_SQE_ALG_BITS 5
#define HPRE_SQE_DONE_SHIFT 30
@@ -39,12 +39,17 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+/* due to nist p521 */
+#define HPRE_ECC_MAX_KSZ 66
+
/* size in bytes of the n prime */
#define HPRE_ECC_NIST_P192_N_SIZE 24
#define HPRE_ECC_NIST_P256_N_SIZE 32
+#define HPRE_ECC_NIST_P384_N_SIZE 48
/* size in bytes */
#define HPRE_ECC_HW256_KSZ_B 32
+#define HPRE_ECC_HW384_KSZ_B 48
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
@@ -102,6 +107,7 @@ struct hpre_curve25519_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
+ struct device *dev;
struct hpre_asym_request **req_list;
struct hpre *hpre;
spinlock_t req_lock;
@@ -214,8 +220,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
int is_src, dma_addr_t *tmp)
{
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = hpre_req->ctx->dev;
enum dma_data_direction dma_dir;
if (is_src) {
@@ -239,7 +244,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
void *ptr;
int shift;
@@ -293,11 +298,13 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
struct scatterlist *dst,
struct scatterlist *src)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, tmp)))
+ return;
if (src) {
if (req->src)
@@ -307,6 +314,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, tmp)))
+ return;
if (req->dst) {
if (dst)
@@ -321,16 +330,15 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
- struct device *dev = HPRE_DEV(ctx);
struct hpre_asym_request *req;
unsigned int err, done, alg;
int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
-#define HREE_HW_ERR_MASK 0x7ff
-#define HREE_SQE_DONE_MASK 0x3
-#define HREE_ALG_TYPE_MASK 0x1f
+#define HREE_HW_ERR_MASK GENMASK(10, 0)
+#define HREE_SQE_DONE_MASK GENMASK(1, 0)
+#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
id = (int)le16_to_cpu(sqe->tag);
req = ctx->req_list[id];
hpre_rm_req_from_ctx(req);
@@ -346,7 +354,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
return 0;
alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
- dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
+ dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
alg, done, err);
return -EINVAL;
@@ -361,6 +369,7 @@ static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ ctx->dev = &qp->qm->pdev->dev;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
ctx->hpre = hpre;
@@ -524,6 +533,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
}
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
@@ -618,14 +629,14 @@ static int hpre_is_dh_params_length_valid(unsigned int key_sz)
case _HPRE_DH_GRP15:
case _HPRE_DH_GRP16:
return 0;
+ default:
+ return -EINVAL;
}
-
- return -EINVAL;
}
static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz;
if (params->p_size > HPRE_DH_MAX_P_SZ)
@@ -664,7 +675,7 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
if (is_clear_all)
@@ -877,18 +888,18 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
if (!hpre_rsa_key_size_is_support(ctx->key_sz))
return 0;
- ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
&ctx->rsa.dma_pubkey,
GFP_KERNEL);
if (!ctx->rsa.pubkey)
return -ENOMEM;
if (private) {
- ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
+ ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
&ctx->rsa.dma_prikey,
GFP_KERNEL);
if (!ctx->rsa.prikey) {
- dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
+ dma_free_coherent(ctx->dev, vlen << 1,
ctx->rsa.pubkey,
ctx->rsa.dma_pubkey);
ctx->rsa.pubkey = NULL;
@@ -950,7 +961,7 @@ static int hpre_crt_para_get(char *para, size_t para_sz,
static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
{
unsigned int hlf_ksz = ctx->key_sz >> 1;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
u64 offset;
int ret;
@@ -1008,7 +1019,7 @@ free_key:
static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
unsigned int half_key_sz = ctx->key_sz >> 1;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
@@ -1179,7 +1190,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
bool is_ecdh)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
@@ -1202,12 +1213,21 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
hpre_ctx_clear(ctx, is_clear_all);
}
+/*
+ * The bits of 192/224/256/384/521 are supported by HPRE,
+ * and convert the bits like:
+ * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
+ * If the parameter bit width is insufficient, then we fill in the
+ * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
+ */
static unsigned int hpre_ecdh_supported_curve(unsigned short id)
{
switch (id) {
case ECC_CURVE_NIST_P192:
case ECC_CURVE_NIST_P256:
return HPRE_ECC_HW256_KSZ_B;
+ case ECC_CURVE_NIST_P384:
+ return HPRE_ECC_HW384_KSZ_B;
default:
break;
}
@@ -1272,6 +1292,8 @@ static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
return HPRE_ECC_NIST_P192_N_SIZE;
case ECC_CURVE_NIST_P256:
return HPRE_ECC_NIST_P256_N_SIZE;
+ case ECC_CURVE_NIST_P384:
+ return HPRE_ECC_NIST_P384_N_SIZE;
default:
break;
}
@@ -1281,7 +1303,7 @@ static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz, shift, curve_sz;
int ret;
@@ -1328,11 +1350,32 @@ static bool hpre_key_is_zero(char *key, unsigned short key_sz)
return true;
}
+static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
+{
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ret = crypto_get_default_rng();
+ if (ret) {
+ dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
+ return ret;
+ }
+
+ ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
+ params->key_size);
+ crypto_put_default_rng();
+ if (ret)
+ dev_err(dev, "failed to get rng, ret = %d!\n", ret);
+
+ return ret;
+}
+
static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
+ char key[HPRE_ECC_MAX_KSZ];
unsigned int sz, sz_shift;
struct ecdh params;
int ret;
@@ -1342,6 +1385,15 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
+ /* Use stdrng to generate private key */
+ if (!params.key || !params.key_size) {
+ params.key = key;
+ params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
+ ret = ecdh_gen_privkey(ctx, &params);
+ if (ret)
+ return ret;
+ }
+
if (hpre_key_is_zero(params.key, params.key_size)) {
dev_err(dev, "Invalid hpre key!\n");
return -EINVAL;
@@ -1367,16 +1419,20 @@ static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
struct scatterlist *dst,
struct scatterlist *src)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t dma;
dma = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (src && req->src)
dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
dma = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (req->dst)
dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
@@ -1431,6 +1487,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
h_req->areq.ecdh = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->ecdh.dma_p);
msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
@@ -1450,7 +1508,7 @@ static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int tmpshift;
dma_addr_t dma = 0;
void *ptr;
@@ -1480,8 +1538,8 @@ static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
- dma_addr_t dma = 0;
+ struct device *dev = ctx->dev;
+ dma_addr_t dma;
if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
dev_err(dev, "data or data length is illegal!\n");
@@ -1503,7 +1561,7 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
@@ -1568,6 +1626,15 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
}
+static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P384;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
@@ -1609,7 +1676,7 @@ static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
unsigned int len)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
@@ -1634,7 +1701,7 @@ static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
int ret = -EINVAL;
if (len != CURVE25519_KEY_SIZE ||
@@ -1662,16 +1729,20 @@ static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
struct scatterlist *dst,
struct scatterlist *src)
{
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
struct hpre_sqe *sqe = &req->req;
dma_addr_t dma;
dma = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (src && req->src)
dma_free_coherent(dev, ctx->key_sz, req->src, dma);
dma = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;
if (req->dst)
dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
@@ -1722,6 +1793,8 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
h_req->areq.curve25519 = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->curve25519.dma_p);
msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
@@ -1752,7 +1825,7 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
u8 p[CURVE25519_KEY_SIZE] = { 0 };
const struct ecc_curve *curve;
dma_addr_t dma = 0;
@@ -1790,8 +1863,12 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
* When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
* we get its modulus to p, and then use it.
*/
- if (memcmp(ptr, p, ctx->key_sz) >= 0)
+ if (memcmp(ptr, p, ctx->key_sz) == 0) {
+ dev_err(dev, "gx is p!\n");
+ return -EINVAL;
+ } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
hpre_curve25519_src_modulo_p(ptr);
+ }
hpre_req->src = ptr;
msg->in = cpu_to_le64(dma);
@@ -1807,8 +1884,8 @@ static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = HPRE_DEV(ctx);
- dma_addr_t dma = 0;
+ struct device *dev = ctx->dev;
+ dma_addr_t dma;
if (!data || !sg_is_last(data) || len != ctx->key_sz) {
dev_err(dev, "data or data length is illegal!\n");
@@ -1830,7 +1907,7 @@ static int hpre_curve25519_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = HPRE_DEV(ctx);
+ struct device *dev = ctx->dev;
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
@@ -1940,7 +2017,7 @@ static struct kpp_alg ecdh_nist_p192 = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
.cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh",
+ .cra_driver_name = "hpre-ecdh-nist-p192",
.cra_module = THIS_MODULE,
},
};
@@ -1957,7 +2034,24 @@ static struct kpp_alg ecdh_nist_p256 = {
.cra_ctxsize = sizeof(struct hpre_ctx),
.cra_priority = HPRE_CRYPTO_ALG_PRI,
.cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh",
+ .cra_driver_name = "hpre-ecdh-nist-p256",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg ecdh_nist_p384 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p384_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "hpre-ecdh-nist-p384",
.cra_module = THIS_MODULE,
},
};
@@ -1989,16 +2083,25 @@ static int hpre_register_ecdh(void)
return ret;
ret = crypto_register_kpp(&ecdh_nist_p256);
- if (ret) {
- crypto_unregister_kpp(&ecdh_nist_p192);
- return ret;
- }
+ if (ret)
+ goto unregister_ecdh_p192;
+
+ ret = crypto_register_kpp(&ecdh_nist_p384);
+ if (ret)
+ goto unregister_ecdh_p256;
return 0;
+
+unregister_ecdh_p256:
+ crypto_unregister_kpp(&ecdh_nist_p256);
+unregister_ecdh_p192:
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
}
static void hpre_unregister_ecdh(void)
{
+ crypto_unregister_kpp(&ecdh_nist_p384);
crypto_unregister_kpp(&ecdh_nist_p256);
crypto_unregister_kpp(&ecdh_nist_p192);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 046bc962c8b2..8b0640fb04be 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -36,7 +36,7 @@
#define HPRE_INT_MASK 0x301400
#define HPRE_INT_STATUS 0x301800
#define HPRE_CORE_INT_ENABLE 0
-#define HPRE_CORE_INT_DISABLE 0x003fffff
+#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -50,6 +50,7 @@
#define HPRE_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
+#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
#define HPRE_HAC_RAS_FE_ENABLE 0
#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
@@ -57,7 +58,6 @@
#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
#define HPRE_HAC_ECC1_CNT 0x301a04
#define HPRE_HAC_ECC2_CNT 0x301a08
-#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
@@ -69,13 +69,17 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
-#define HPRE_QM_USR_CFG_MASK 0xfffffffe
-#define HPRE_QM_AXI_CFG_MASK 0xffff
-#define HPRE_QM_VFG_AX_MASK 0xff
-#define HPRE_BD_USR_MASK 0x3
-#define HPRE_CLUSTER_CORE_MASK_V2 0xf
-#define HPRE_CLUSTER_CORE_MASK_V3 0xff
+#define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
+#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
+#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
+#define HPRE_BD_USR_MASK GENMASK(1, 0)
+#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0)
+#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0)
+#define HPRE_PREFETCH_CFG 0x301130
+#define HPRE_SVA_PREFTCH_DFX 0x30115C
+#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
+#define HPRE_PREFETCH_DISABLE BIT(30)
+#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
@@ -88,11 +92,7 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
-#define HPRE_CLUSTERS_NUM(qm) \
- (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : HPRE_CLUSTERS_NUM_V2)
-#define HPRE_CLUSTER_CORE_MASK(qm) \
- (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTER_CORE_MASK_V3 :\
- HPRE_CLUSTER_CORE_MASK_V2)
+#define HPRE_SHAPER_TYPE_RATE 128
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -123,21 +123,49 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
- { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
- { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
- { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
- { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
- { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
- { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
- { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
- { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
- { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
- { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
- { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
- { .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set"},
- { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set"},
{
+ .int_msk = BIT(0),
+ .msg = "core_ecc_1bit_err_int_set"
+ }, {
+ .int_msk = BIT(1),
+ .msg = "core_ecc_2bit_err_int_set"
+ }, {
+ .int_msk = BIT(2),
+ .msg = "dat_wb_poison_int_set"
+ }, {
+ .int_msk = BIT(3),
+ .msg = "dat_rd_poison_int_set"
+ }, {
+ .int_msk = BIT(4),
+ .msg = "bd_rd_poison_int_set"
+ }, {
+ .int_msk = BIT(5),
+ .msg = "ooo_ecc_2bit_err_int_set"
+ }, {
+ .int_msk = BIT(6),
+ .msg = "cluster1_shb_timeout_int_set"
+ }, {
+ .int_msk = BIT(7),
+ .msg = "cluster2_shb_timeout_int_set"
+ }, {
+ .int_msk = BIT(8),
+ .msg = "cluster3_shb_timeout_int_set"
+ }, {
+ .int_msk = BIT(9),
+ .msg = "cluster4_shb_timeout_int_set"
+ }, {
+ .int_msk = GENMASK(15, 10),
+ .msg = "ooo_rdrsp_err_int_set"
+ }, {
+ .int_msk = GENMASK(21, 16),
+ .msg = "ooo_wrrsp_err_int_set"
+ }, {
+ .int_msk = BIT(22),
+ .msg = "pt_rng_timeout_int_set"
+ }, {
+ .int_msk = BIT(23),
+ .msg = "sva_fsm_timeout_int_set"
+ }, {
/* sentinel */
}
};
@@ -224,6 +252,18 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+static inline int hpre_cluster_num(struct hisi_qm *qm)
+{
+ return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
+ HPRE_CLUSTERS_NUM_V2;
+}
+
+static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
+{
+ return (qm->ver >= QM_HW_V3) ?
+ HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+}
+
struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
@@ -290,8 +330,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
- u32 cluster_core_mask = HPRE_CLUSTER_CORE_MASK(qm);
- u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ u32 cluster_core_mask = hpre_cluster_core_mask(qm);
+ u8 clusters_num = hpre_cluster_num(qm);
struct device *dev = &qm->pdev->dev;
unsigned long offset;
u32 val = 0;
@@ -302,10 +342,10 @@ static int hpre_set_cluster(struct hisi_qm *qm)
/* clusters initiating */
writel(cluster_core_mask,
- HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
- writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
- HPRE_CORE_INI_STATUS), val,
+ qm->io_base + offset + HPRE_CORE_ENB);
+ writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
+ ret = readl_relaxed_poll_timeout(qm->io_base + offset +
+ HPRE_CORE_INI_STATUS, val,
((val & cluster_core_mask) ==
cluster_core_mask),
HPRE_REG_RD_INTVRL_US,
@@ -329,11 +369,52 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
{
u32 val;
- val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
val |= HPRE_QM_PM_FLR;
- writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
- writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+ writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
+}
+
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
+ val &= HPRE_PREFETCH_ENABLE;
+ writel(val, qm->io_base + HPRE_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+}
+
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
+ val |= HPRE_PREFETCH_DISABLE;
+ writel(val, qm->io_base + HPRE_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@@ -342,33 +423,33 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
u32 val;
int ret;
- writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
- writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
- writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
+ writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
/* HPRE need more time, we close this interrupt */
- val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
- writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
+ writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
if (qm->ver >= QM_HW_V3)
writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
- HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ qm->io_base + HPRE_TYPES_ENB);
else
- writel(HPRE_RSA_ENB, HPRE_ADDR(qm, HPRE_TYPES_ENB));
-
- writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
- writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
- writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
- writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
- writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
- writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
-
- writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
- writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
- writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
- val & BIT(0),
+ writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
+
+ writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
+ writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
+ writel(0x0, qm->io_base + HPRE_INT_MASK);
+ writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
+ writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
+ writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
+
+ writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
+ writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
+ writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
+ val & BIT(0),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret) {
@@ -397,7 +478,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
- u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ u8 clusters_num = hpre_cluster_num(qm);
unsigned long offset;
int i;
@@ -413,36 +494,49 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hisi_qm *qm)
+static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
- u32 val;
+ u32 val1, val2;
+
+ val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ if (enable) {
+ val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ val2 = HPRE_HAC_RAS_NFE_ENABLE;
+ } else {
+ val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ val2 = 0x0;
+ }
+
+ if (qm->ver > QM_HW_V2)
+ writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+
+ writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+}
+static void hpre_hw_error_disable(struct hisi_qm *qm)
+{
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
- /* disable HPRE block master OOO when m-bit error occur */
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
+ hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 val;
-
/* clear HPRE hw error source if having */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
- /* enable hpre hw error interrupts */
- writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
+ /* configure error type */
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
- /* enable HPRE block master OOO when m-bit error occur */
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
+ hpre_master_ooo_ctrl(qm, true);
+
+ /* enable hpre hw error interrupts */
+ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -650,7 +744,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
- u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ u8 clusters_num = hpre_cluster_num(qm);
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
@@ -788,7 +882,7 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
{
- return readl(qm->io_base + HPRE_HAC_INT_STATUS);
+ return readl(qm->io_base + HPRE_INT_STATUS);
}
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
@@ -802,9 +896,9 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static void hpre_err_info_init(struct hisi_qm *qm)
@@ -829,6 +923,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
+ .open_sva_prefetch = hpre_open_sva_prefetch,
+ .close_sva_prefetch = hpre_close_sva_prefetch,
.err_info_init = hpre_err_info_init,
};
@@ -841,6 +937,8 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
+ hpre_open_sva_prefetch(qm);
+
qm->err_ini = &hpre_err_ini;
qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
@@ -850,6 +948,7 @@ static int hpre_pf_probe_init(struct hpre *hpre)
static int hpre_probe_init(struct hpre *hpre)
{
+ u32 type_rate = HPRE_SHAPER_TYPE_RATE;
struct hisi_qm *qm = &hpre->qm;
int ret;
@@ -857,6 +956,11 @@ static int hpre_probe_init(struct hpre *hpre)
ret = hpre_pf_probe_init(hpre);
if (ret)
return ret;
+ /* Enable shaper type 0 */
+ if (qm->ver >= QM_HW_V3) {
+ type_rate |= QM_SHAPER_ENABLE;
+ qm->type_rate = type_rate;
+ }
}
return 0;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ce439a0c66c9..1d67f94a1d56 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -25,9 +25,11 @@
#define QM_IRQ_NUM_V1 1
#define QM_IRQ_NUM_PF_V2 4
#define QM_IRQ_NUM_VF_V2 2
+#define QM_IRQ_NUM_VF_V3 3
#define QM_EQ_EVENT_IRQ_VECTOR 0
#define QM_AEQ_EVENT_IRQ_VECTOR 1
+#define QM_CMD_EVENT_IRQ_VECTOR 2
#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
/* mailbox */
@@ -39,6 +41,8 @@
#define QM_MB_CMD_CQC_BT 0x5
#define QM_MB_CMD_SQC_VFT_V2 0x6
#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
#define QM_MB_CMD_SEND_BASE 0x300
#define QM_MB_EVENT_SHIFT 8
@@ -46,6 +50,9 @@
#define QM_MB_OP_SHIFT 14
#define QM_MB_CMD_DATA_ADDR_L 0x304
#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_PING_ALL_VFS 0xffff
+#define QM_MB_CMD_DATA_SHIFT 32
+#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -95,6 +102,7 @@
#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_PAGE_SIZE 0x0034
#define QM_QUE_ISO_EN 0x100154
#define QM_CAPBILITY 0x100158
#define QM_QP_NUN_MASK GENMASK(10, 0)
@@ -155,11 +163,15 @@
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
+#define QM_OOO_SHUTDOWN_SEL 0x1040f8
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
#define ACC_VENDOR_ID_VALUE 0x5a5a
#define QM_PEH_DFX_INFO0 0x1000fc
+#define QM_PEH_DFX_INFO1 0x100100
+#define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
+#define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
@@ -170,6 +182,31 @@
#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_MSI_CAP_ENABLE BIT(16)
+
+/* interfunction communication */
+#define QM_IFC_READY_STATUS 0x100128
+#define QM_IFC_C_STS_M 0x10012C
+#define QM_IFC_INT_SET_P 0x100130
+#define QM_IFC_INT_CFG 0x100134
+#define QM_IFC_INT_SOURCE_P 0x100138
+#define QM_IFC_INT_SOURCE_V 0x0020
+#define QM_IFC_INT_MASK 0x0024
+#define QM_IFC_INT_STATUS 0x0028
+#define QM_IFC_INT_SET_V 0x002C
+#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
+#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
+#define QM_IFC_INT_SOURCE_MASK BIT(0)
+#define QM_IFC_INT_DISABLE BIT(0)
+#define QM_IFC_INT_STATUS_MASK BIT(0)
+#define QM_IFC_INT_SET_MASK BIT(0)
+#define QM_WAIT_DST_ACK 10
+#define QM_MAX_PF_WAIT_COUNT 10
+#define QM_MAX_VF_WAIT_COUNT 40
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_TIMEOUT_US \
+ (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
#define QM_DFX_MB_CNT_VF 0x104010
#define QM_DFX_DB_CNT_VF 0x104020
@@ -205,6 +242,33 @@
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
#define QM_RESETTING 2
+#define QM_QOS_PARAM_NUM 2
+#define QM_QOS_VAL_NUM 1
+#define QM_QOS_BDF_PARAM_NUM 4
+#define QM_QOS_MAX_VAL 1000
+#define QM_QOS_RATE 100
+#define QM_QOS_EXPAND_RATE 1000
+#define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
+#define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
+#define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
+#define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
+#define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
+#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
+#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
+#define QM_SHAPER_CBS_B 1
+#define QM_SHAPER_CBS_S 16
+#define QM_SHAPER_VFT_OFFSET 6
+#define WAIT_FOR_QOS_VF 100
+#define QM_QOS_MIN_ERROR_RATE 5
+#define QM_QOS_TYPICAL_NUM 8
+#define QM_SHAPER_MIN_CBS_S 8
+#define QM_QOS_TICK 0x300U
+#define QM_QOS_DIVISOR_CLK 0x1f40U
+#define QM_QOS_MAX_CIR_B 200
+#define QM_QOS_MIN_CIR_B 100
+#define QM_QOS_MAX_CIR_U 6
+#define QM_QOS_MAX_CIR_S 11
+#define QM_QOS_VAL_MAX_LEN 32
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -245,6 +309,7 @@
enum vft_type {
SQC_VFT = 0,
CQC_VFT,
+ SHAPER_VFT,
};
enum acc_err_result {
@@ -253,6 +318,23 @@ enum acc_err_result {
ACC_ERR_RECOVERED,
};
+enum qm_alg_type {
+ ALG_TYPE_0,
+ ALG_TYPE_1,
+};
+
+enum qm_mb_cmd {
+ QM_PF_FLR_PREPARE = 0x01,
+ QM_PF_SRST_PREPARE,
+ QM_PF_RESET_DONE,
+ QM_VF_PREPARE_DONE,
+ QM_VF_PREPARE_FAIL,
+ QM_VF_START_DONE,
+ QM_VF_START_FAIL,
+ QM_PF_SET_QOS,
+ QM_VF_GET_QOS,
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -351,6 +433,9 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*stop_qp)(struct hisi_qp *qp);
+ int (*set_msi)(struct hisi_qm *qm, bool set);
+ int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
+ int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
};
struct qm_dfx_item {
@@ -412,6 +497,11 @@ static const char * const qp_s[] = {
"none", "init", "start", "stop", "close",
};
+static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000,
+ 10000, 25000, 50000, 100000};
+static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16,
+ 17, 18, 19};
+
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
@@ -491,6 +581,18 @@ static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
return avail;
}
+static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
+ u64 base, u16 queue, bool op)
+{
+ mailbox->w0 = cpu_to_le16((cmd) |
+ ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
+ (0x1 << QM_MB_BUSY_SHIFT));
+ mailbox->queue_num = cpu_to_le16(queue);
+ mailbox->base_l = cpu_to_le32(lower_32_bits(base));
+ mailbox->base_h = cpu_to_le32(upper_32_bits(base));
+ mailbox->rsvd = 0;
+}
+
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
@@ -523,44 +625,42 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
: "memory");
}
-static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
- bool op)
+static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
- struct qm_mailbox mailbox;
- int ret = 0;
-
- dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
- queue, cmd, (unsigned long long)dma_addr);
-
- mailbox.w0 = cpu_to_le16(cmd |
- (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
- (0x1 << QM_MB_BUSY_SHIFT));
- mailbox.queue_num = cpu_to_le16(queue);
- mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
- mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
- mailbox.rsvd = 0;
-
- mutex_lock(&qm->mailbox_lock);
-
if (unlikely(qm_wait_mb_ready(qm))) {
- ret = -EBUSY;
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
- goto busy_unlock;
+ goto mb_busy;
}
- qm_mb_write(qm, &mailbox);
+ qm_mb_write(qm, mailbox);
if (unlikely(qm_wait_mb_ready(qm))) {
- ret = -EBUSY;
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
- goto busy_unlock;
+ goto mb_busy;
}
-busy_unlock:
+ return 0;
+
+mb_busy:
+ atomic64_inc(&qm->debug.dfx.mb_err_cnt);
+ return -EBUSY;
+}
+
+static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op)
+{
+ struct qm_mailbox mailbox;
+ int ret;
+
+ dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
+ queue, cmd, (unsigned long long)dma_addr);
+
+ qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
+
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
mutex_unlock(&qm->mailbox_lock);
- if (ret)
- atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@@ -626,6 +726,14 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
return QM_IRQ_NUM_VF_V2;
}
+static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ return QM_IRQ_NUM_PF_V2;
+
+ return QM_IRQ_NUM_VF_V3;
+}
+
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
@@ -730,6 +838,21 @@ static irqreturn_t qm_irq(int irq, void *data)
return IRQ_NONE;
}
+static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+ u32 val;
+
+ val = readl(qm->io_base + QM_IFC_INT_STATUS);
+ val &= QM_IFC_INT_STATUS_MASK;
+ if (!val)
+ return IRQ_NONE;
+
+ schedule_work(&qm->cmd_process);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_aeq_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -770,14 +893,16 @@ static void qm_irq_unregister(struct hisi_qm *qm)
free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- if (qm->ver == QM_HW_V1)
- return;
+ if (qm->ver > QM_HW_V1) {
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
+ }
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
+ if (qm->ver > QM_HW_V2)
+ free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
}
static void qm_init_qp_status(struct hisi_qp *qp)
@@ -790,8 +915,95 @@ static void qm_init_qp_status(struct hisi_qp *qp)
atomic_set(&qp_status->used, 0);
}
+static void qm_init_prefetch(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 page_type = 0x0;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ switch (PAGE_SIZE) {
+ case SZ_4K:
+ page_type = 0x0;
+ break;
+ case SZ_16K:
+ page_type = 0x1;
+ break;
+ case SZ_64K:
+ page_type = 0x2;
+ break;
+ default:
+ dev_err(dev, "system page size is not support: %lu, default set to 4KB",
+ PAGE_SIZE);
+ }
+
+ writel(page_type, qm->io_base + QM_PAGE_SIZE);
+}
+
+/*
+ * the formula:
+ * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
+ *
+ * IR_b * (2 ^ IR_u) * 8
+ * IR(Mbps) * 10 ^ -3 = -------------------------
+ * Tick * (2 ^ IR_s)
+ */
+static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
+{
+ return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
+ (QM_QOS_TICK * (1 << cir_s));
+}
+
+static u32 acc_shaper_calc_cbs_s(u32 ir)
+{
+ int i;
+
+ if (ir < typical_qos_val[0])
+ return QM_SHAPER_MIN_CBS_S;
+
+ for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) {
+ if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i])
+ return typical_qos_cbs_s[i - 1];
+ }
+
+ return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1];
+}
+
+static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
+{
+ u32 cir_b, cir_u, cir_s, ir_calc;
+ u32 error_rate;
+
+ factor->cbs_s = acc_shaper_calc_cbs_s(ir);
+
+ for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
+ for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
+ for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) {
+ /** the formula is changed to:
+ * IR_b * (2 ^ IR_u) * DIVISOR_CLK
+ * IR(Mbps) = -------------------------
+ * 768 * (2 ^ IR_s)
+ */
+ ir_calc = acc_shaper_para_calc(cir_b, cir_u,
+ cir_s);
+ error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
+ if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
+ factor->cir_b = cir_b;
+ factor->cir_u = cir_u;
+ factor->cir_s = cir_s;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
- u32 number)
+ u32 number, struct qm_shaper_factor *factor)
{
u64 tmp = 0;
@@ -820,6 +1032,15 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
tmp = QM_CQC_VFT_VALID;
}
break;
+ case SHAPER_VFT:
+ if (qm->ver >= QM_HW_V3) {
+ tmp = factor->cir_b |
+ (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
+ (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
+ (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
+ (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
+ }
+ break;
}
}
@@ -830,6 +1051,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
u32 fun_num, u32 base, u32 number)
{
+ struct qm_shaper_factor *factor = &qm->factor[fun_num];
unsigned int val;
int ret;
@@ -841,9 +1063,12 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
writel(type, qm->io_base + QM_VFT_CFG_TYPE);
+ if (type == SHAPER_VFT)
+ fun_num |= base << QM_SHAPER_VFT_OFFSET;
+
writel(fun_num, qm->io_base + QM_VFT_CFG);
- qm_vft_data_cfg(qm, type, base, number);
+ qm_vft_data_cfg(qm, type, base, number, factor);
writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
@@ -853,6 +1078,27 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
POLL_TIMEOUT);
}
+static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
+{
+ int ret, i;
+
+ qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL;
+ ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
+ return ret;
+ }
+ writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
+ for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
+ /* The base number of queue reuse for different alg type */
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/* The config should be conducted after qm_dev_mem_reset() */
static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
@@ -865,7 +1111,21 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return ret;
}
+ /* init default shaper qos val */
+ if (qm->ver >= QM_HW_V3) {
+ ret = qm_shaper_init_vft(qm, fun_num);
+ if (ret)
+ goto back_sqc_cqc;
+ }
+
return 0;
+back_sqc_cqc:
+ for (i = SQC_VFT; i <= CQC_VFT; i++) {
+ ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
+ if (ret)
+ return ret;
+ }
+ return ret;
}
static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
@@ -1570,16 +1830,9 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
if (count > QM_DBG_WRITE_LEN)
return -ENOSPC;
- cmd_buf = kzalloc(count + 1, GFP_KERNEL);
- if (!cmd_buf)
- return -ENOMEM;
-
- if (copy_from_user(cmd_buf, buffer, count)) {
- kfree(cmd_buf);
- return -EFAULT;
- }
-
- cmd_buf[count] = '\0';
+ cmd_buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
@@ -1623,13 +1876,9 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
-
qm->error_mask = ce | nfe | fe;
-
/* clear QM hw residual error source */
writel(QM_ABNORMAL_INT_SOURCE_CLR,
qm->io_base + QM_ABNORMAL_INT_SOURCE);
@@ -1639,6 +1888,14 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+}
+
+static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+{
+ u32 irq_enable = ce | nfe | fe;
+ u32 irq_unmask = ~irq_enable;
+
+ qm_hw_error_cfg(qm, ce, nfe, fe);
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1649,6 +1906,28 @@ static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
+static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+{
+ u32 irq_enable = ce | nfe | fe;
+ u32 irq_unmask = ~irq_enable;
+
+ qm_hw_error_cfg(qm, ce, nfe, fe);
+
+ /* enable close master ooo when hardware error happened */
+ writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
+}
+
+static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
+{
+ writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+
+ /* disable close master ooo when hardware error happened */
+ writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+}
+
static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
{
const struct hisi_qm_hw_error *err;
@@ -1715,15 +1994,371 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static u32 qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static u32 qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+/* Check if the error causes the master ooo block */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ u32 val, dev_val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = qm_get_hw_error_status(qm);
+ dev_val = qm_get_dev_err_status(qm);
+
+ if (qm->ver < QM_HW_V3)
+ return (val & QM_ECC_MBIT) ||
+ (dev_val & qm->err_info.ecc_2bits_mask);
+
+ return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
+ (dev_val & (~qm->err_info.dev_ce_mask));
+}
+
+static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
+{
+ struct qm_mailbox mailbox;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret)
+ goto err_unlock;
+
+ *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+ ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
+
+err_unlock:
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+}
+
+static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
+{
+ u32 val;
+
+ if (qm->fun_type == QM_HW_PF)
+ writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
+
+ val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
+ val |= QM_IFC_INT_SOURCE_MASK;
+ writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
+}
+
+static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 cmd;
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ if (ret) {
+ dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ return;
+ }
+
+ cmd = msg & QM_MB_CMD_DATA_MASK;
+ switch (cmd) {
+ case QM_VF_PREPARE_FAIL:
+ dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
+ break;
+ case QM_VF_START_FAIL:
+ dev_err(dev, "failed to start VF(%u)!\n", vf_id);
+ break;
+ case QM_VF_PREPARE_DONE:
+ case QM_VF_START_DONE:
+ break;
+ default:
+ dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ break;
+ }
+}
+
+static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 vfs_num = qm->vfs_num;
+ int cnt = 0;
+ int ret = 0;
+ u64 val;
+ u32 i;
+
+ if (!qm->vfs_num || qm->ver < QM_HW_V3)
+ return 0;
+
+ while (true) {
+ val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
+ /* All VFs send command to PF, break */
+ if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
+ break;
+
+ if (++cnt > QM_MAX_PF_WAIT_COUNT) {
+ ret = -EBUSY;
+ break;
+ }
+
+ msleep(QM_WAIT_DST_ACK);
+ }
+
+ /* PF check VFs msg */
+ for (i = 1; i <= vfs_num; i++) {
+ if (val & BIT(i))
+ qm_handle_vf_msg(qm, i);
+ else
+ dev_err(dev, "VF(%u) not ping PF!\n", i);
+ }
+
+ /* PF clear interrupt to ack VFs */
+ qm_clear_cmd_interrupt(qm, val);
+
+ return ret;
+}
+
+static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
+{
+ u32 val;
+
+ val = readl(qm->io_base + QM_IFC_INT_CFG);
+ val &= ~QM_IFC_SEND_ALL_VFS;
+ val |= fun_num;
+ writel(val, qm->io_base + QM_IFC_INT_CFG);
+
+ val = readl(qm->io_base + QM_IFC_INT_SET_P);
+ val |= QM_IFC_INT_SET_MASK;
+ writel(val, qm->io_base + QM_IFC_INT_SET_P);
+}
+
+static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + QM_IFC_INT_SET_V);
+ val |= QM_IFC_INT_SET_MASK;
+ writel(val, qm->io_base + QM_IFC_INT_SET_V);
+}
+
+static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_mailbox mailbox;
+ int cnt = 0;
+ u64 val;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret) {
+ dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
+ goto err_unlock;
+ }
+
+ qm_trigger_vf_interrupt(qm, fun_num);
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ val = readq(qm->io_base + QM_IFC_READY_STATUS);
+ /* if VF respond, PF notifies VF successfully. */
+ if (!(val & BIT(fun_num)))
+ goto err_unlock;
+
+ if (++cnt > QM_MAX_PF_WAIT_COUNT) {
+ dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+}
+
+static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 vfs_num = qm->vfs_num;
+ struct qm_mailbox mailbox;
+ u64 val = 0;
+ int cnt = 0;
+ int ret;
+ u32 i;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
+ mutex_lock(&qm->mailbox_lock);
+ /* PF sends command to all VFs by mailbox */
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret) {
+ dev_err(dev, "failed to send command to VFs!\n");
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+ }
+
+ qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ val = readq(qm->io_base + QM_IFC_READY_STATUS);
+ /* If all VFs acked, PF notifies VFs successfully. */
+ if (!(val & GENMASK(vfs_num, 1))) {
+ mutex_unlock(&qm->mailbox_lock);
+ return 0;
+ }
+
+ if (++cnt > QM_MAX_PF_WAIT_COUNT)
+ break;
+ }
+
+ mutex_unlock(&qm->mailbox_lock);
+
+ /* Check which vf respond timeout. */
+ for (i = 1; i <= vfs_num; i++) {
+ if (val & BIT(i))
+ dev_err(dev, "failed to get response from VF(%u)!\n", i);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+{
+ struct qm_mailbox mailbox;
+ int cnt = 0;
+ u32 val;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ goto unlock;
+ }
+
+ qm_trigger_pf_interrupt(qm);
+ /* Waiting for PF response */
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ val = readl(qm->io_base + QM_IFC_INT_SET_V);
+ if (!(val & QM_IFC_INT_STATUS_MASK))
+ break;
+
+ if (++cnt > QM_MAX_VF_WAIT_COUNT) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+unlock:
+ mutex_unlock(&qm->mailbox_lock);
+ return ret;
+}
+
static int qm_stop_qp(struct hisi_qp *qp)
{
return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
}
+static int qm_set_msi(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ if (set) {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ 0);
+ } else {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ ACC_PEH_MSI_DISABLE);
+ if (qm->err_status.is_qm_ecc_mbit ||
+ qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ mdelay(1);
+ if (readl(qm->io_base + QM_PEH_DFX_INFO0))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void qm_wait_msi_finish(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 cmd = ~0;
+ int cnt = 0;
+ u32 val;
+ int ret;
+
+ while (true) {
+ pci_read_config_dword(pdev, pdev->msi_cap +
+ PCI_MSI_PENDING_64, &cmd);
+ if (!cmd)
+ break;
+
+ if (++cnt > MAX_WAIT_COUNTS) {
+ pci_warn(pdev, "failed to empty MSI PENDING!\n");
+ break;
+ }
+
+ udelay(1);
+ }
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
+ val, !(val & QM_PEH_DFX_MASK),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ pci_warn(pdev, "failed to empty PEH MSI!\n");
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
+ val, !(val & QM_PEH_MSI_FINISH_MASK),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ pci_warn(pdev, "failed to finish MSI operation!\n");
+}
+
+static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret = -ETIMEDOUT;
+ u32 cmd, i;
+
+ pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
+ if (set)
+ cmd |= QM_MSI_CAP_ENABLE;
+ else
+ cmd &= ~QM_MSI_CAP_ENABLE;
+
+ pci_write_config_dword(pdev, pdev->msi_cap, cmd);
+ if (set) {
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
+ if (cmd & QM_MSI_CAP_ENABLE)
+ return 0;
+
+ udelay(1);
+ }
+ } else {
+ udelay(WAIT_PERIOD_US_MIN);
+ qm_wait_msi_finish(qm);
+ ret = 0;
+ }
+
+ return ret;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.get_irq_num = qm_get_irq_num_v1,
.hw_error_init = qm_hw_error_init_v1,
+ .set_msi = qm_set_msi,
};
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
@@ -1733,16 +2368,20 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.hw_error_init = qm_hw_error_init_v2,
.hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi,
};
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v2,
- .hw_error_init = qm_hw_error_init_v2,
- .hw_error_uninit = qm_hw_error_uninit_v2,
+ .get_irq_num = qm_get_irq_num_v3,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.stop_qp = qm_stop_qp,
+ .set_msi = qm_set_msi_v3,
+ .ping_all_vfs = qm_ping_all_vfs,
+ .ping_pf = qm_ping_pf,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2017,11 +2656,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
int ret = 0, i = 0;
void *addr;
- /*
- * No need to judge if ECC multi-bit error occurs because the
- * master OOO will be blocked.
- */
- if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
+ /* No need to judge if master OOO is blocked. */
+ if (qm_check_dev_error(qm))
return 0;
/* Kunpeng930 supports drain qp by device */
@@ -2290,6 +2926,23 @@ static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
hisi_qm_stop_qp(q->priv);
}
+static int hisi_qm_is_q_updated(struct uacce_queue *q)
+{
+ struct hisi_qp *qp = q->priv;
+ struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+ int updated = 0;
+
+ while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
+ /* make sure to read data from memory */
+ dma_rmb();
+ qm_cq_head_update(qp);
+ cqe = qp->cqe + qp->qp_status.cq_head;
+ updated = 1;
+ }
+
+ return updated;
+}
+
static void qm_set_sqctype(struct uacce_queue *q, u16 type)
{
struct hisi_qm *qm = q->uacce->priv;
@@ -2335,6 +2988,7 @@ static const struct uacce_ops uacce_qm_ops = {
.stop_queue = hisi_qm_uacce_stop_queue,
.mmap = hisi_qm_uacce_mmap,
.ioctl = hisi_qm_uacce_ioctl,
+ .is_q_updated = hisi_qm_is_q_updated,
};
static int qm_alloc_uacce(struct hisi_qm *qm)
@@ -2530,62 +3184,6 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
return 0;
}
-static int hisi_qm_memory_init(struct hisi_qm *qm)
-{
- struct device *dev = &qm->pdev->dev;
- size_t qp_dma_size, off = 0;
- int i, ret = 0;
-
-#define QM_INIT_BUF(qm, type, num) do { \
- (qm)->type = ((qm)->qdma.va + (off)); \
- (qm)->type##_dma = (qm)->qdma.dma + (off); \
- off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
-} while (0)
-
- idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
- QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
- qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
- GFP_ATOMIC);
- dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
- if (!qm->qdma.va)
- return -ENOMEM;
-
- QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, sqc, qm->qp_num);
- QM_INIT_BUF(qm, cqc, qm->qp_num);
-
- qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
- if (!qm->qp_array) {
- ret = -ENOMEM;
- goto err_alloc_qp_array;
- }
-
- /* one more page for device or qp statuses */
- qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
- qp_dma_size = PAGE_ALIGN(qp_dma_size);
- for (i = 0; i < qm->qp_num; i++) {
- ret = hisi_qp_memory_init(qm, qp_dma_size, i);
- if (ret)
- goto err_init_qp_mem;
-
- dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
- }
-
- return ret;
-
-err_init_qp_mem:
- hisi_qp_memory_uninit(qm, i);
-err_alloc_qp_array:
- dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-
- return ret;
-}
-
static void hisi_qm_pre_init(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2604,6 +3202,34 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->misc_ctl = false;
}
+static void qm_cmd_uninit(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + QM_IFC_INT_MASK);
+ val |= QM_IFC_INT_DISABLE;
+ writel(val, qm->io_base + QM_IFC_INT_MASK);
+}
+
+static void qm_cmd_init(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Clear communication interrupt source */
+ qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
+
+ /* Enable pf to vf communication reg. */
+ val = readl(qm->io_base + QM_IFC_INT_MASK);
+ val &= ~QM_IFC_INT_DISABLE;
+ writel(val, qm->io_base + QM_IFC_INT_MASK);
+}
+
static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2635,6 +3261,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ qm_cmd_uninit(qm);
+ kfree(qm->factor);
down_write(&qm->qps_lock);
if (!qm_avail_state(qm, QM_CLOSE)) {
@@ -2826,6 +3454,8 @@ static int __hisi_qm_start(struct hisi_qm *qm)
if (ret)
return ret;
+ qm_init_prefetch(qm);
+
writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
@@ -3034,79 +3664,6 @@ static int qm_debugfs_atomic64_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
qm_debugfs_atomic64_set, "%llu\n");
-/**
- * hisi_qm_debug_init() - Initialize qm related debugfs files.
- * @qm: The qm for which we want to add debugfs files.
- *
- * Create qm related debugfs files.
- */
-void hisi_qm_debug_init(struct hisi_qm *qm)
-{
- struct qm_dfx *dfx = &qm->debug.dfx;
- struct dentry *qm_d;
- void *data;
- int i;
-
- qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
- qm->debug.qm_d = qm_d;
-
- /* only show this in PF */
- if (qm->fun_type == QM_HW_PF) {
- qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
- for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
- qm_create_debugfs_file(qm, qm_d, i);
- }
-
- debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
-
- debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
-
- debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
- &qm_status_fops);
- for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
- data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
- debugfs_create_file(qm_dfx_files[i].name,
- 0644,
- qm_d,
- data,
- &qm_atomic64_ops);
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
-
-/**
- * hisi_qm_debug_regs_clear() - clear qm debug related registers.
- * @qm: The qm for which we want to clear its debug registers.
- */
-void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
-{
- struct qm_dfx_registers *regs;
- int i;
-
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
- /* clear current_q */
- writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
- writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- /*
- * these registers are reading and clearing, so clear them after
- * reading them.
- */
- writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
-
- regs = qm_dfx_regs;
- for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
- readl(qm->io_base + regs->reg_offset);
- regs++;
- }
-
- writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
-
static void qm_hw_error_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -3362,6 +3919,360 @@ static int qm_clear_vft_config(struct hisi_qm *qm)
return 0;
}
+static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 ir = qos * QM_QOS_RATE;
+ int ret, total_vfs, i;
+
+ total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+ if (fun_index > total_vfs)
+ return -EINVAL;
+
+ qm->factor[fun_index].func_qos = qos;
+
+ ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
+ if (ret) {
+ dev_err(dev, "failed to calculate shaper parameter!\n");
+ return -EINVAL;
+ }
+
+ for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
+ /* The base number of queue reuse for different alg type */
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
+ if (ret) {
+ dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
+{
+ u64 cir_u = 0, cir_b = 0, cir_s = 0;
+ u64 shaper_vft, ir_calc, ir;
+ unsigned int val;
+ u32 error_rate;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), POLL_PERIOD,
+ POLL_TIMEOUT);
+ if (ret)
+ return 0;
+
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
+ writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
+ writel(fun_index, qm->io_base + QM_VFT_CFG);
+
+ writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
+ writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+ val & BIT(0), POLL_PERIOD,
+ POLL_TIMEOUT);
+ if (ret)
+ return 0;
+
+ shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
+ ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
+
+ cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
+ cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
+ cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
+
+ cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
+ cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
+
+ ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
+
+ ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
+
+ error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
+ if (error_rate > QM_QOS_MIN_ERROR_RATE) {
+ pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
+ return 0;
+ }
+
+ return ir;
+}
+
+static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
+{
+ struct device *dev = &qm->pdev->dev;
+ u64 mb_cmd;
+ u32 qos;
+ int ret;
+
+ qos = qm_get_shaper_vft_qos(qm, fun_num);
+ if (!qos) {
+ dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
+ return;
+ }
+
+ mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
+ ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ if (ret)
+ dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+}
+
+static int qm_vf_read_qos(struct hisi_qm *qm)
+{
+ int cnt = 0;
+ int ret;
+
+ /* reset mailbox qos val */
+ qm->mb_qos = 0;
+
+ /* vf ping pf to get function qos */
+ if (qm->ops->ping_pf) {
+ ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
+ if (ret) {
+ pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+ return ret;
+ }
+ }
+
+ while (true) {
+ msleep(QM_WAIT_DST_ACK);
+ if (qm->mb_qos)
+ break;
+
+ if (++cnt > QM_MAX_VF_WAIT_COUNT) {
+ pci_err(qm->pdev, "PF ping VF timeout!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char tbuf[QM_DBG_READ_LEN];
+ u32 qos_val, ir;
+ int ret;
+
+ /* Mailbox and reset cannot be operated at the same time */
+ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
+ return -EAGAIN;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ir = qm_get_shaper_vft_qos(qm, 0);
+ } else {
+ ret = qm_vf_read_qos(qm);
+ if (ret)
+ goto err_get_status;
+ ir = qm->mb_qos;
+ }
+
+ qos_val = ir / QM_QOS_RATE;
+ ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
+
+ ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_get_status:
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+ return ret;
+}
+
+static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
+{
+ int buflen = strlen(buf);
+ int ret, i;
+
+ for (i = 0; i < buflen; i++) {
+ if (!isdigit(buf[i]))
+ return -EINVAL;
+ }
+
+ ret = sscanf(buf, "%ld", val);
+ if (ret != QM_QOS_VAL_NUM)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char tbuf[QM_DBG_READ_LEN];
+ int tmp1, bus, device, function;
+ char tbuf_bdf[QM_DBG_READ_LEN] = {0};
+ char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
+ unsigned int fun_index;
+ unsigned long val = 0;
+ int len, ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return -EINVAL;
+
+ /* Mailbox and reset cannot be operated at the same time */
+ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
+ return -EAGAIN;
+ }
+
+ if (*pos != 0) {
+ ret = 0;
+ goto err_get_status;
+ }
+
+ if (count >= QM_DBG_READ_LEN) {
+ ret = -ENOSPC;
+ goto err_get_status;
+ }
+
+ len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
+ if (len < 0) {
+ ret = len;
+ goto err_get_status;
+ }
+
+ tbuf[len] = '\0';
+ ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf);
+ if (ret != QM_QOS_PARAM_NUM) {
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ ret = qm_qos_value_init(val_buf, &val);
+ if (val == 0 || val > QM_QOS_MAX_VAL || ret) {
+ pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function);
+ if (ret != QM_QOS_BDF_PARAM_NUM) {
+ pci_err(qm->pdev, "input pci bdf value is error!\n");
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ fun_index = device * 8 + function;
+
+ ret = qm_func_shaper_enable(qm, fun_index, val);
+ if (ret) {
+ pci_err(qm->pdev, "failed to enable function shaper!\n");
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
+ ret = count;
+
+err_get_status:
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+ return ret;
+}
+
+static const struct file_operations qm_algqos_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_algqos_read,
+ .write = qm_algqos_write,
+};
+
+/**
+ * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
+ * @qm: The qm for which we want to add debugfs files.
+ *
+ * Create function qos debugfs files.
+ */
+static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
+ qm, &qm_algqos_fops);
+ else
+ debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
+ qm, &qm_algqos_fops);
+}
+
+/**
+ * hisi_qm_debug_init() - Initialize qm related debugfs files.
+ * @qm: The qm for which we want to add debugfs files.
+ *
+ * Create qm related debugfs files.
+ */
+void hisi_qm_debug_init(struct hisi_qm *qm)
+{
+ struct qm_dfx *dfx = &qm->debug.dfx;
+ struct dentry *qm_d;
+ void *data;
+ int i;
+
+ qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
+ qm->debug.qm_d = qm_d;
+
+ /* only show this in PF */
+ if (qm->fun_type == QM_HW_PF) {
+ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
+ for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
+ qm_create_debugfs_file(qm, qm->debug.qm_d, i);
+ }
+
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+
+ debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
+
+ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+ &qm_status_fops);
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+ debugfs_create_file(qm_dfx_files[i].name,
+ 0644,
+ qm_d,
+ data,
+ &qm_atomic64_ops);
+ }
+
+ if (qm->ver >= QM_HW_V3)
+ hisi_qm_set_algqos_init(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
+
+/**
+ * hisi_qm_debug_regs_clear() - clear qm debug related registers.
+ * @qm: The qm for which we want to clear its debug registers.
+ */
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
+{
+ struct qm_dfx_registers *regs;
+ int i;
+
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* clear current_q */
+ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+ writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ /*
+ * these registers are reading and clearing, so clear them after
+ * reading them.
+ */
+ writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
+
+ regs = qm_dfx_regs;
+ for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
+ readl(qm->io_base + regs->reg_offset);
+ regs++;
+ }
+
+ /* clear clear_enable */
+ writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+
/**
* hisi_qm_sriov_enable() - enable virtual functions
* @pdev: the PCIe device
@@ -3416,6 +4327,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -3429,6 +4341,9 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
+ /* clear vf function shaper configure array */
+ memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
return qm_clear_vft_config(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -3527,17 +4442,15 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
-static u32 qm_get_hw_error_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
-}
-
static int qm_check_req_recv(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
u32 val;
+ if (qm->ver >= QM_HW_V3)
+ return 0;
+
writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
(val == ACC_VENDOR_ID_VALUE),
@@ -3608,28 +4521,6 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-static int qm_set_msi(struct hisi_qm *qm, bool set)
-{
- struct pci_dev *pdev = qm->pdev;
-
- if (set) {
- pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
- 0);
- } else {
- pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
- ACC_PEH_MSI_DISABLE);
- if (qm->err_status.is_qm_ecc_mbit ||
- qm->err_status.is_dev_ecc_mbit)
- return 0;
-
- mdelay(1);
- if (readl(qm->io_base + QM_PEH_DFX_INFO0))
- return -EFAULT;
- }
-
- return 0;
-}
-
static int qm_vf_reset_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
@@ -3660,14 +4551,35 @@ stop_fail:
return ret;
}
-static int qm_reset_prepare_ready(struct hisi_qm *qm)
+static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+ enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int ret;
+
+ if (!qm->vfs_num)
+ return 0;
+
+ /* Kunpeng930 supports to notify VFs to stop before PF reset */
+ if (qm->ops->ping_all_vfs) {
+ ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (ret)
+ pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ } else {
+ ret = qm_vf_reset_prepare(qm, stop_reason);
+ if (ret)
+ pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
+ }
+
+ return ret;
+}
+
+static int qm_wait_reset_finish(struct hisi_qm *qm)
+{
int delay = 0;
/* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_RESETTING, &pf_qm->misc_ctl)) {
+ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return -EBUSY;
@@ -3676,6 +4588,32 @@ static int qm_reset_prepare_ready(struct hisi_qm *qm)
return 0;
}
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ /*
+ * PF and VF on host doesnot support resetting at the
+ * same time on Kunpeng920.
+ */
+ if (qm->ver < QM_HW_V3)
+ return qm_wait_reset_finish(pf_qm);
+
+ return qm_wait_reset_finish(qm);
+}
+
+static void qm_reset_bit_clear(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (qm->ver < QM_HW_V3)
+ clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+}
+
static int qm_controller_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -3687,22 +4625,25 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return ret;
}
- if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
- if (ret) {
- pci_err(pdev, "Fails to stop VFs!\n");
- clear_bit(QM_RESETTING, &qm->misc_ctl);
- return ret;
- }
- }
+ /* PF obtains the information of VF by querying the register. */
+ qm_cmd_uninit(qm);
+
+ /* Whether VFs stop successfully, soft reset will continue. */
+ ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
+ if (ret)
+ pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return ret;
}
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to stop by vfs in soft reset!\n");
+
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return 0;
@@ -3712,6 +4653,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
{
u32 nfe_enb = 0;
+ /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
+ if (qm->ver >= QM_HW_V3)
+ return;
+
if (!qm->err_status.is_dev_ecc_mbit &&
qm->err_status.is_qm_ecc_mbit &&
qm->err_ini->close_axi_master_ooo) {
@@ -3748,7 +4693,7 @@ static int qm_soft_reset(struct hisi_qm *qm)
}
}
- ret = qm_set_msi(qm, false);
+ ret = qm->ops->set_msi(qm, false);
if (ret) {
pci_err(pdev, "Fails to disable PEH MSI bit.\n");
return ret;
@@ -3770,6 +4715,9 @@ static int qm_soft_reset(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->close_sva_prefetch)
+ qm->err_ini->close_sva_prefetch(qm);
+
ret = qm_set_pf_mse(qm, false);
if (ret) {
pci_err(pdev, "Fails to disable pf MSE bit.\n");
@@ -3830,9 +4778,32 @@ restart_fail:
return ret;
}
-static u32 qm_get_dev_err_status(struct hisi_qm *qm)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
{
- return qm->err_ini->get_dev_hw_err_status(qm);
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ if (!qm->vfs_num)
+ return 0;
+
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Kunpeng930 supports to notify VFs to start after PF reset. */
+ if (qm->ops->ping_all_vfs) {
+ ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (ret)
+ pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
+ } else {
+ ret = qm_vf_reset_done(qm);
+ if (ret)
+ pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
+ }
+
+ return ret;
}
static int qm_dev_hw_init(struct hisi_qm *qm)
@@ -3844,6 +4815,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
+ if (qm->err_ini->open_sva_prefetch)
+ qm->err_ini->open_sva_prefetch(qm);
+
+ if (qm->ver >= QM_HW_V3)
+ return;
+
if (!qm->err_status.is_qm_ecc_mbit &&
!qm->err_status.is_dev_ecc_mbit)
return;
@@ -3863,15 +4840,15 @@ static void qm_restart_prepare(struct hisi_qm *qm)
/* clear AM Reorder Buffer ecc mbit source */
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
-
- if (qm->err_ini->open_axi_master_ooo)
- qm->err_ini->open_axi_master_ooo(qm);
}
static void qm_restart_done(struct hisi_qm *qm)
{
u32 value;
+ if (qm->ver >= QM_HW_V3)
+ goto clear_flags;
+
if (!qm->err_status.is_qm_ecc_mbit &&
!qm->err_status.is_dev_ecc_mbit)
return;
@@ -3881,6 +4858,7 @@ static void qm_restart_done(struct hisi_qm *qm)
value |= qm->err_info.msi_wr_port;
writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+clear_flags:
qm->err_status.is_qm_ecc_mbit = false;
qm->err_status.is_dev_ecc_mbit = false;
}
@@ -3890,7 +4868,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
- ret = qm_set_msi(qm, true);
+ ret = qm->ops->set_msi(qm, true);
if (ret) {
pci_err(pdev, "Fails to enable PEH MSI bit!\n");
return ret;
@@ -3917,6 +4895,9 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
}
qm_restart_prepare(qm);
+ hisi_qm_dev_err_init(qm);
+ if (qm->err_ini->open_axi_master_ooo)
+ qm->err_ini->open_axi_master_ooo(qm);
ret = qm_restart(qm);
if (ret) {
@@ -3924,24 +4905,18 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
return ret;
}
- if (qm->vfs_num) {
- ret = qm_vf_q_assign(qm, qm->vfs_num);
- if (ret) {
- pci_err(pdev, "Failed to assign queue!\n");
- return ret;
- }
- }
+ ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
+ if (ret)
+ pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
- ret = qm_vf_reset_done(qm);
- if (ret) {
- pci_err(pdev, "Failed to start VFs!\n");
- return -EPERM;
- }
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to start by vfs in soft reset!\n");
- hisi_qm_dev_err_init(qm);
+ qm_cmd_init(qm);
qm_restart_done(qm);
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return 0;
}
@@ -3962,13 +4937,13 @@ static int qm_controller_reset(struct hisi_qm *qm)
ret = qm_soft_reset(qm);
if (ret) {
pci_err(pdev, "Controller reset failed (%d)\n", ret);
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return ret;
}
ret = qm_controller_reset_done(qm);
if (ret) {
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
return ret;
}
@@ -4005,21 +4980,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
-/* check the interrupt is ecc-mbit error or not */
-static int qm_check_dev_error(struct hisi_qm *qm)
-{
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
- if (ret)
- return ret;
-
- return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask);
-}
-
void hisi_qm_reset_prepare(struct pci_dev *pdev)
{
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
@@ -4045,14 +5005,13 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
return;
}
- if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm, QM_FLR);
- if (ret) {
- pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
- ret);
- return;
- }
- }
+ /* PF obtains the information of VF by querying the register. */
+ if (qm->fun_type == QM_HW_PF)
+ qm_cmd_uninit(qm);
+
+ ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
+ if (ret)
+ pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
@@ -4060,6 +5019,10 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
return;
}
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to stop by vfs in FLR!\n");
+
pci_info(pdev, "FLR resetting...\n");
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
@@ -4085,42 +5048,38 @@ void hisi_qm_reset_done(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
- hisi_qm_dev_err_init(pf_qm);
-
- ret = qm_restart(qm);
- if (ret) {
- pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
- goto flr_done;
- }
-
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_hw_init(qm);
if (ret) {
pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
goto flr_done;
}
+ }
- if (!qm->vfs_num)
- goto flr_done;
-
- ret = qm_vf_q_assign(qm, qm->vfs_num);
- if (ret) {
- pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
- goto flr_done;
- }
+ hisi_qm_dev_err_init(pf_qm);
- ret = qm_vf_reset_done(qm);
- if (ret) {
- pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
- goto flr_done;
- }
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
+ goto flr_done;
}
+ ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
+ if (ret)
+ pci_err(pdev, "failed to start vfs by pf in FLR.\n");
+
+ ret = qm_wait_vf_prepare_finish(qm);
+ if (ret)
+ pci_err(pdev, "failed to start by vfs in FLR!\n");
+
flr_done:
+ if (qm->fun_type == QM_HW_PF)
+ qm_cmd_init(qm);
+
if (qm_flr_reset_complete(pdev))
pci_info(pdev, "FLR reset complete\n");
- clear_bit(QM_RESETTING, &qm->misc_ctl);
+ qm_reset_bit_clear(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
@@ -4149,7 +5108,7 @@ static int qm_irq_register(struct hisi_qm *qm)
if (ret)
return ret;
- if (qm->ver != QM_HW_V1) {
+ if (qm->ver > QM_HW_V1) {
ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
qm_aeq_irq, 0, qm->dev_name, qm);
if (ret)
@@ -4164,8 +5123,18 @@ static int qm_irq_register(struct hisi_qm *qm)
}
}
+ if (qm->ver > QM_HW_V2) {
+ ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
+ qm_mb_cmd_irq, 0, qm->dev_name, qm);
+ if (ret)
+ goto err_mb_cmd_irq;
+ }
+
return 0;
+err_mb_cmd_irq:
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
err_abonormal_irq:
free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
err_aeq_irq:
@@ -4202,6 +5171,183 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
}
+static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
+{
+ enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "reset prepare not ready!\n");
+ atomic_set(&qm->status.flags, QM_STOP);
+ cmd = QM_VF_PREPARE_FAIL;
+ goto err_prepare;
+ }
+
+ ret = hisi_qm_stop(qm, stop_reason);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
+ atomic_set(&qm->status.flags, QM_STOP);
+ cmd = QM_VF_PREPARE_FAIL;
+ goto err_prepare;
+ }
+
+err_prepare:
+ pci_save_state(pdev);
+ ret = qm->ops->ping_pf(qm, cmd);
+ if (ret)
+ dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
+}
+
+static void qm_pf_reset_vf_done(struct hisi_qm *qm)
+{
+ enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_restore_state(pdev);
+ ret = hisi_qm_start(qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
+ cmd = QM_VF_START_FAIL;
+ }
+
+ ret = qm->ops->ping_pf(qm, cmd);
+ if (ret)
+ dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
+
+ qm_reset_bit_clear(qm);
+}
+
+static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ u32 val, cmd;
+ u64 msg;
+ int ret;
+
+ /* Wait for reset to finish */
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
+ val == BIT(0), QM_VF_RESET_WAIT_US,
+ QM_VF_RESET_WAIT_TIMEOUT_US);
+ /* hardware completion status should be available by this time */
+ if (ret) {
+ dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Whether message is got successfully,
+ * VF needs to ack PF by clearing the interrupt.
+ */
+ ret = qm_get_mb_cmd(qm, &msg, 0);
+ qm_clear_cmd_interrupt(qm, 0);
+ if (ret) {
+ dev_err(dev, "failed to get msg from PF in reset done!\n");
+ return ret;
+ }
+
+ cmd = msg & QM_MB_CMD_DATA_MASK;
+ if (cmd != QM_PF_RESET_DONE) {
+ dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void qm_pf_reset_vf_process(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ dev_info(dev, "device reset start...\n");
+
+ /* The message is obtained by querying the register during resetting */
+ qm_cmd_uninit(qm);
+ qm_pf_reset_vf_prepare(qm, stop_reason);
+
+ ret = qm_wait_pf_reset_finish(qm);
+ if (ret)
+ goto err_get_status;
+
+ qm_pf_reset_vf_done(qm);
+ qm_cmd_init(qm);
+
+ dev_info(dev, "device reset done.\n");
+
+ return;
+
+err_get_status:
+ qm_cmd_init(qm);
+ qm_reset_bit_clear(qm);
+}
+
+static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
+{
+ struct device *dev = &qm->pdev->dev;
+ u64 msg;
+ u32 cmd;
+ int ret;
+
+ /*
+ * Get the msg from source by sending mailbox. Whether message is got
+ * successfully, destination needs to ack source by clearing the interrupt.
+ */
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ qm_clear_cmd_interrupt(qm, BIT(fun_num));
+ if (ret) {
+ dev_err(dev, "failed to get msg from source!\n");
+ return;
+ }
+
+ cmd = msg & QM_MB_CMD_DATA_MASK;
+ switch (cmd) {
+ case QM_PF_FLR_PREPARE:
+ qm_pf_reset_vf_process(qm, QM_FLR);
+ break;
+ case QM_PF_SRST_PREPARE:
+ qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
+ break;
+ case QM_VF_GET_QOS:
+ qm_vf_get_qos(qm, fun_num);
+ break;
+ case QM_PF_SET_QOS:
+ qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ break;
+ default:
+ dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ break;
+ }
+}
+
+static void qm_cmd_process(struct work_struct *cmd_process)
+{
+ struct hisi_qm *qm = container_of(cmd_process,
+ struct hisi_qm, cmd_process);
+ u32 vfs_num = qm->vfs_num;
+ u64 val;
+ u32 i;
+
+ if (qm->fun_type == QM_HW_PF) {
+ val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
+ if (!val)
+ return;
+
+ for (i = 1; i <= vfs_num; i++) {
+ if (val & BIT(i))
+ qm_handle_cmd_msg(qm, i);
+ }
+
+ return;
+ }
+
+ qm_handle_cmd_msg(qm, 0);
+}
+
/**
* hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
* @qm: The qm needs add.
@@ -4212,11 +5358,9 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
*/
int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
+ struct device *dev = &qm->pdev->dev;
int flag = 0;
int ret = 0;
- /* HW V2 not support both use uacce sva mode and hardware crypto algs */
- if (qm->ver <= QM_HW_V2 && qm->use_sva)
- return 0;
mutex_lock(&qm_list->lock);
if (list_empty(&qm_list->list))
@@ -4224,6 +5368,11 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
list_add_tail(&qm->list, &qm_list->list);
mutex_unlock(&qm_list->lock);
+ if (qm->ver <= QM_HW_V2 && qm->use_sva) {
+ dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
+ return 0;
+ }
+
if (flag) {
ret = qm_list->register_to_crypto(qm);
if (ret) {
@@ -4248,13 +5397,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
*/
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
- if (qm->ver <= QM_HW_V2 && qm->use_sva)
- return;
-
mutex_lock(&qm_list->lock);
list_del(&qm->list);
mutex_unlock(&qm_list->lock);
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return;
+
if (list_empty(&qm_list->list))
qm_list->unregister_from_crypto(qm);
}
@@ -4389,6 +5538,94 @@ err_disable_pcidev:
return ret;
}
+static void hisi_qm_init_work(struct hisi_qm *qm)
+{
+ INIT_WORK(&qm->work, qm_work_process);
+ if (qm->fun_type == QM_HW_PF)
+ INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
+
+ if (qm->ver > QM_HW_V2)
+ INIT_WORK(&qm->cmd_process, qm_cmd_process);
+}
+
+static int hisi_qp_alloc_memory(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t qp_dma_size;
+ int i, ret;
+
+ qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
+ if (!qm->qp_array)
+ return -ENOMEM;
+
+ /* one more page for device or qp statuses */
+ qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
+ for (i = 0; i < qm->qp_num; i++) {
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ if (ret)
+ goto err_init_qp_mem;
+
+ dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
+ }
+
+ return 0;
+err_init_qp_mem:
+ hisi_qp_memory_uninit(qm, i);
+
+ return ret;
+}
+
+static int hisi_qm_memory_init(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret, total_vfs;
+ size_t off = 0;
+
+ total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+ qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ if (!qm->factor)
+ return -ENOMEM;
+
+#define QM_INIT_BUF(qm, type, num) do { \
+ (qm)->type = ((qm)->qdma.va + (off)); \
+ (qm)->type##_dma = (qm)->qdma.dma + (off); \
+ off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
+} while (0)
+
+ idr_init(&qm->qp_idr);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
+ QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
+ qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
+ GFP_ATOMIC);
+ dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
+ if (!qm->qdma.va) {
+ ret = -ENOMEM;
+ goto err_alloc_qdma;
+ }
+
+ QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
+ QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, sqc, qm->qp_num);
+ QM_INIT_BUF(qm, cqc, qm->qp_num);
+
+ ret = hisi_qp_alloc_memory(qm);
+ if (ret)
+ goto err_alloc_qp_array;
+
+ return 0;
+
+err_alloc_qp_array:
+ dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
+err_alloc_qdma:
+ kfree(qm->factor);
+
+ return ret;
+}
+
/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
@@ -4426,10 +5663,8 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
goto err_alloc_uacce;
- INIT_WORK(&qm->work, qm_work_process);
- if (qm->fun_type == QM_HW_PF)
- INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
-
+ hisi_qm_init_work(qm);
+ qm_cmd_init(qm);
atomic_set(&qm->status.flags, QM_INIT);
return 0;
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index acefdf8b3a50..035eaf8c442d 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -76,6 +76,9 @@
#define QM_Q_DEPTH 1024
#define QM_MIN_QNUM 2
#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
@@ -148,6 +151,14 @@ struct qm_debug {
struct debugfs_file files[DEBUG_FILE_NUM];
};
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
struct qm_dma {
void *va;
dma_addr_t dma;
@@ -188,6 +199,8 @@ struct hisi_qm_err_ini {
void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
void (*open_axi_master_ooo)(struct hisi_qm *qm);
void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
void (*err_info_init)(struct hisi_qm *qm);
};
@@ -248,6 +261,7 @@ struct hisi_qm {
struct workqueue_struct *wq;
struct work_struct work;
struct work_struct rst_work;
+ struct work_struct cmd_process;
const char *algs;
bool use_sva;
@@ -259,6 +273,9 @@ struct hisi_qm {
resource_size_t db_phys_base;
struct uacce_device *uacce;
int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
};
struct hisi_qp_status {
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index dfdce2f21e65..018415b9840a 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -13,14 +13,14 @@ struct sec_alg_res {
dma_addr_t pbuf_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *a_ivin;
+ dma_addr_t a_ivin_dma;
u8 *out_mac;
dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
struct sec_cipher_req {
- struct hisi_acc_hw_sgl *c_in;
- dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
u8 *c_ivin;
@@ -33,15 +33,25 @@ struct sec_cipher_req {
struct sec_aead_req {
u8 *out_mac;
dma_addr_t out_mac_dma;
+ u8 *a_ivin;
+ dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
};
/* SEC request of Crypto */
struct sec_req {
- struct sec_sqe sec_sqe;
+ union {
+ struct sec_sqe sec_sqe;
+ struct sec_sqe3 sec_sqe3;
+ };
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
+ /**
+ * Common parameter of the SEC request.
+ */
+ struct hisi_acc_hw_sgl *in;
+ dma_addr_t in_dma;
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
struct list_head backlog_head;
@@ -81,7 +91,9 @@ struct sec_auth_ctx {
u8 a_key_len;
u8 mac_len;
u8 a_alg;
+ bool fallback;
struct crypto_shash *hash_tfm;
+ struct crypto_aead *fallback_aead_tfm;
};
/* SEC cipher context which cipher's relatives */
@@ -94,6 +106,10 @@ struct sec_cipher_ctx {
u8 c_mode;
u8 c_alg;
u8 c_key_len;
+
+ /* add software support */
+ bool fallback;
+ struct crypto_sync_skcipher *fbtfm;
};
/* SEC queue context which defines queue's relatives */
@@ -137,6 +153,7 @@ struct sec_ctx {
bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
+ u8 type_supported;
struct device *dev;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 133aede8bf07..6a45bd23b363 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/aes.h>
+#include <crypto/aead.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/des.h>
@@ -21,6 +22,7 @@
#define SEC_PRIORITY 4001
#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
@@ -37,10 +39,23 @@
#define SEC_AEAD_ALG_OFFSET 11
#define SEC_AUTH_OFFSET 6
+#define SEC_DE_OFFSET_V3 9
+#define SEC_SCENE_OFFSET_V3 5
+#define SEC_CKEY_OFFSET_V3 13
+#define SEC_SRC_SGL_OFFSET_V3 11
+#define SEC_DST_SGL_OFFSET_V3 14
+#define SEC_CALG_OFFSET_V3 4
+#define SEC_AKEY_OFFSET_V3 9
+#define SEC_MAC_OFFSET_V3 4
+#define SEC_AUTH_ALG_OFFSET_V3 15
+#define SEC_CIPHER_AUTH_V3 0xbf
+#define SEC_AUTH_CIPHER_V3 0x40
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
+#define SEC_ICV_MASK 0x000E
+#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
@@ -66,6 +81,25 @@
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
+#define SEC_ICV_ERR 0x2
+#define MIN_MAC_LEN 4
+#define MAC_LEN_MASK 0x1U
+#define MAX_INPUT_DATA_LEN 0xFFFE00
+#define BITS_MASK 0xFF
+#define BYTE_BITS 0x8
+#define SEC_XTS_NAME_SZ 0x3
+#define IV_CM_CAL_NUM 2
+#define IV_CL_MASK 0x7
+#define IV_CL_MIN 2
+#define IV_CL_MID 4
+#define IV_CL_MAX 8
+#define IV_FLAGS_OFFSET 0x6
+#define IV_CM_OFFSET 0x3
+#define IV_LAST_BYTE1 1
+#define IV_LAST_BYTE2 2
+#define IV_LAST_BYTE_MASK 0xFF
+#define IV_CTR_INIT 0x1
+#define IV_BYTE_OFFSET 0x8
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
@@ -124,22 +158,59 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_aead_verify(struct sec_req *req)
+static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
{
- struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->aead_req.out_mac;
- u8 *mac = mac_out + SEC_MAX_MAC_LEN;
- struct scatterlist *sgl = aead_req->src;
- size_t sz;
+ struct sec_sqe *bd = resp;
+
+ status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
+ status->flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ status->tag = le16_to_cpu(bd->type2.tag);
+ status->err_type = bd->type2.error_type;
+
+ return bd->type_cipher_auth & SEC_TYPE_MASK;
+}
+
+static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
+{
+ struct sec_sqe3 *bd3 = resp;
+
+ status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
+ status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
+ status->flag = (le16_to_cpu(bd3->done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ status->tag = le64_to_cpu(bd3->tag);
+ status->err_type = bd3->error_type;
+
+ return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
+}
+
+static int sec_cb_status_check(struct sec_req *req,
+ struct bd_status *status)
+{
+ struct sec_ctx *ctx = req->ctx;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
- aead_req->cryptlen + aead_req->assoclen -
- authsize);
- if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
- dev_err(req->ctx->dev, "aead verify failure!\n");
- return -EBADMSG;
+ if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
+ dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
+ req->err_type, status->done);
+ return -EIO;
+ }
+
+ if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
+ if (unlikely(status->flag != SEC_SQE_CFLAG)) {
+ dev_err_ratelimited(ctx->dev, "flag[%u]\n",
+ status->flag);
+ return -EIO;
+ }
+ } else if (unlikely(ctx->alg_type == SEC_AEAD)) {
+ if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
+ status->icv == SEC_ICV_ERR)) {
+ dev_err_ratelimited(ctx->dev,
+ "flag[%u], icv[%u]\n",
+ status->flag, status->icv);
+ return -EBADMSG;
+ }
}
return 0;
@@ -149,43 +220,38 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
- struct sec_sqe *bd = resp;
+ u8 type_supported = qp_ctx->ctx->type_supported;
+ struct bd_status status;
struct sec_ctx *ctx;
struct sec_req *req;
- u16 done, flag;
- int err = 0;
+ int err;
u8 type;
- type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (unlikely(type != SEC_BD_TYPE2)) {
+ if (type_supported == SEC_BD_TYPE2) {
+ type = pre_parse_finished_bd(&status, resp);
+ req = qp_ctx->req_list[status.tag];
+ } else {
+ type = pre_parse_finished_bd3(&status, resp);
+ req = (void *)(uintptr_t)status.tag;
+ }
+
+ if (unlikely(type != type_supported)) {
atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%d]\n", type);
return;
}
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
atomic_inc(&qp->qp_status.used);
return;
}
- req->err_type = bd->type2.error_type;
+
+ req->err_type = status.err_type;
ctx = req->ctx;
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (unlikely(req->err_type || done != SEC_SQE_DONE ||
- (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
- (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
- dev_err_ratelimited(ctx->dev,
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- err = -EIO;
+ err = sec_cb_status_check(req, &status);
+ if (err)
atomic64_inc(&dfx->done_flag_cnt);
- }
-
- if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
- err = sec_aead_verify(req);
atomic64_inc(&dfx->recv_cnt);
@@ -253,6 +319,30 @@ static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
res->c_ivin, res->c_ivin_dma);
}
+static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->a_ivin_dma, GFP_KERNEL);
+ if (!res->a_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
+ res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->a_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->a_ivin, res->a_ivin_dma);
+}
+
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
int i;
@@ -335,9 +425,13 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
return ret;
if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_aiv_resource(dev, res);
+ if (ret)
+ goto alloc_aiv_fail;
+
ret = sec_alloc_mac_resource(dev, res);
if (ret)
- goto alloc_fail;
+ goto alloc_mac_fail;
}
if (ctx->pbuf_supported) {
ret = sec_alloc_pbuf_resource(dev, res);
@@ -352,7 +446,10 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
alloc_pbuf_fail:
if (ctx->alg_type == SEC_AEAD)
sec_free_mac_resource(dev, qp_ctx->res);
-alloc_fail:
+alloc_mac_fail:
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_aiv_resource(dev, res);
+alloc_aiv_fail:
sec_free_civ_resource(dev, res);
return ret;
}
@@ -382,10 +479,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp = ctx->qps[qp_ctx_id];
qp->req_type = 0;
qp->qp_ctx = qp_ctx;
- qp->req_cb = sec_req_cb;
qp_ctx->qp = qp;
qp_ctx->ctx = ctx;
+ qp->req_cb = sec_req_cb;
+
mutex_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
@@ -536,6 +634,26 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
a_ctx->a_key, a_ctx->a_key_dma);
}
+static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
+{
+ const char *alg = crypto_tfm_alg_name(&tfm->base);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->fallback = false;
+ if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
+ return 0;
+
+ c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(c_ctx->fbtfm)) {
+ pr_err("failed to alloc fallback tfm!\n");
+ return PTR_ERR(c_ctx->fbtfm);
+ }
+
+ return 0;
+}
+
static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -557,8 +675,14 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
if (ret)
goto err_cipher_init;
+ ret = sec_skcipher_fbtfm_init(tfm);
+ if (ret)
+ goto err_fbtfm_init;
+
return 0;
+err_fbtfm_init:
+ sec_cipher_uninit(ctx);
err_cipher_init:
sec_ctx_base_uninit(ctx);
return ret;
@@ -568,6 +692,9 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ if (ctx->c_ctx.fbtfm)
+ crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
+
sec_cipher_uninit(ctx);
sec_ctx_base_uninit(ctx);
}
@@ -607,6 +734,9 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
case SEC_XTS_MIN_KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
+ case SEC_XTS_MID_KEY_SIZE:
+ c_ctx->fallback = true;
+ break;
case SEC_XTS_MAX_KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
@@ -615,19 +745,25 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
return -EINVAL;
}
} else {
- switch (keylen) {
- case AES_KEYSIZE_128:
- c_ctx->c_key_len = SEC_CKEY_128BIT;
- break;
- case AES_KEYSIZE_192:
- c_ctx->c_key_len = SEC_CKEY_192BIT;
- break;
- case AES_KEYSIZE_256:
- c_ctx->c_key_len = SEC_CKEY_256BIT;
- break;
- default:
- pr_err("hisi_sec2: aes key error!\n");
+ if (c_ctx->c_alg == SEC_CALG_SM4 &&
+ keylen != AES_KEYSIZE_128) {
+ pr_err("hisi_sec2: sm4 key error!\n");
return -EINVAL;
+ } else {
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aes key error!\n");
+ return -EINVAL;
+ }
}
}
@@ -672,7 +808,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
-
+ if (c_ctx->fallback) {
+ ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "failed to set fallback skcipher key!\n");
+ return ret;
+ }
+ }
return 0;
}
@@ -686,22 +828,30 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-
+GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
+GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
+GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
-
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
+GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
+GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src)
{
- struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct aead_request *aead_req = a_req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
+ struct crypto_aead *tfm;
+ size_t authsize;
+ u8 *mac_offset;
if (ctx->alg_type == SEC_AEAD)
copy_size = aead_req->cryptlen + aead_req->assoclen;
@@ -709,15 +859,20 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf,
- copy_size);
+ qp_ctx->res[req_id].pbuf, copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
}
+ if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
+ tfm = crypto_aead_reqtfm(aead_req);
+ authsize = crypto_aead_authsize(tfm);
+ mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
+ memcpy(a_req->out_mac, mac_offset, authsize);
+ }
- c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
- c_req->c_out_dma = c_req->c_in_dma;
+ req->in_dma = qp_ctx->res[req_id].pbuf_dma;
+ c_req->c_out_dma = req->in_dma;
return 0;
}
@@ -728,7 +883,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -738,10 +892,29 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
- qp_ctx->res[req_id].pbuf,
- copy_size);
+ qp_ctx->res[req_id].pbuf, copy_size);
if (unlikely(pbuf_length != copy_size))
- dev_err(dev, "copy pbuf data to dst error!\n");
+ dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+}
+
+static int sec_aead_mac_init(struct sec_aead_req *req)
+{
+ struct aead_request *aead_req = req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac_out = req->out_mac;
+ struct scatterlist *sgl = aead_req->src;
+ size_t copy_size;
+ off_t skip_size;
+
+ /* Copy input mac */
+ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
+ authsize, skip_size);
+ if (unlikely(copy_size != authsize))
+ return -EINVAL;
+
+ return 0;
}
static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
@@ -755,37 +928,48 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
int ret;
if (req->use_pbuf) {
- ret = sec_cipher_pbuf_map(ctx, req, src);
c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
if (ctx->alg_type == SEC_AEAD) {
+ a_req->a_ivin = res->a_ivin;
+ a_req->a_ivin_dma = res->a_ivin_dma;
a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
a_req->out_mac_dma = res->pbuf_dma +
SEC_PBUF_MAC_OFFSET;
}
+ ret = sec_cipher_pbuf_map(ctx, req, src);
return ret;
}
c_req->c_ivin = res->c_ivin;
c_req->c_ivin_dma = res->c_ivin_dma;
if (ctx->alg_type == SEC_AEAD) {
+ a_req->a_ivin = res->a_ivin;
+ a_req->a_ivin_dma = res->a_ivin_dma;
a_req->out_mac = res->out_mac;
a_req->out_mac_dma = res->out_mac_dma;
}
- c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
- qp_ctx->c_in_pool,
- req->req_id,
- &c_req->c_in_dma);
-
- if (IS_ERR(c_req->c_in)) {
+ req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &req->in_dma);
+ if (IS_ERR(req->in)) {
dev_err(dev, "fail to dma map input sgl buffers!\n");
- return PTR_ERR(c_req->c_in);
+ return PTR_ERR(req->in);
+ }
+
+ if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
+ ret = sec_aead_mac_init(a_req);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to init mac data for ICV!\n");
+ return ret;
+ }
}
if (dst == src) {
- c_req->c_out = c_req->c_in;
- c_req->c_out_dma = c_req->c_in_dma;
+ c_req->c_out = req->in;
+ c_req->c_out_dma = req->in_dma;
} else {
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
qp_ctx->c_out_pool,
@@ -794,7 +978,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
if (IS_ERR(c_req->c_out)) {
dev_err(dev, "fail to dma map output sgl buffers!\n");
- hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in);
return PTR_ERR(c_req->c_out);
}
}
@@ -812,7 +996,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
sec_cipher_pbuf_unmap(ctx, req, dst);
} else {
if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in);
hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
}
@@ -883,6 +1067,28 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
return 0;
}
+static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ if (unlikely(a_ctx->fallback_aead_tfm))
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+
+ return 0;
+}
+
+static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+ struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
+ crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
+ return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
+}
+
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
@@ -891,6 +1097,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
int ret;
@@ -900,6 +1107,23 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
+ if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
+ ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
+ if (ret) {
+ dev_err(dev, "set sec aes ccm cipher key err!\n");
+ return ret;
+ }
+ memcpy(c_ctx->c_key, key, keylen);
+
+ if (unlikely(a_ctx->fallback_aead_tfm)) {
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
if (crypto_authenc_extractkeys(&keys, key, keylen))
goto bad_key;
@@ -915,6 +1139,12 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
+ if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
+ (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ dev_err(dev, "MAC or AUTH key length error!\n");
+ goto bad_key;
+ }
+
return 0;
bad_key:
@@ -936,6 +1166,14 @@ GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
+ SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
+ SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
+ SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
+ SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -998,7 +1236,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
@@ -1014,29 +1252,86 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
sec_sqe->type_cipher_auth = bd_type | cipher;
- if (req->use_pbuf)
+ /* Set destination and source address type */
+ if (req->use_pbuf) {
sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
- else
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ } else {
sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ }
+
+ sec_sqe->sdm_addr_type |= da_type;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
- if (c_req->c_in_dma != c_req->c_out_dma)
+ if (req->in_dma != c_req->c_out_dma)
de = 0x1 << SEC_DE_OFFSET;
sec_sqe->sds_sa_type = (de | scene | sa_type);
- /* Just set DST address type */
- if (req->use_pbuf)
- da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
- else
- da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
- sec_sqe->sdm_addr_type |= da_type;
-
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
+static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ u32 bd_param = 0;
+ u16 cipher;
+
+ memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
+
+ sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
+ c_ctx->c_mode;
+ sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET_V3);
+
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC;
+ else
+ cipher = SEC_CIPHER_DEC;
+ sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
+
+ if (req->use_pbuf) {
+ bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
+ bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
+ } else {
+ bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
+ bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
+ }
+
+ bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
+ if (req->in_dma != c_req->c_out_dma)
+ bd_param |= 0x1 << SEC_DE_OFFSET_V3;
+
+ bd_param |= SEC_BD_TYPE3;
+ sec_sqe3->bd_param = cpu_to_le32(bd_param);
+
+ sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
+ sec_sqe3->tag = cpu_to_le64(req);
+
+ return 0;
+}
+
+/* increment counter (128-bit int) */
+static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
+{
+ do {
+ --bits;
+ nums += counter[bits];
+ counter[bits] = nums & BITS_MASK;
+ nums >>= BYTE_BITS;
+ } while (bits && nums);
+}
+
static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
struct aead_request *aead_req = req->aead_req.aead_req;
@@ -1060,10 +1355,17 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
cryptlen = aead_req->cryptlen;
}
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
- cryptlen - iv_size);
- if (unlikely(sz != iv_size))
- dev_err(req->ctx->dev, "copy output iv error!\n");
+ if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
+ dev_err(req->ctx->dev, "copy output iv error!\n");
+ } else {
+ sz = cryptlen / iv_size;
+ if (cryptlen % iv_size)
+ sz += 1;
+ ctr_iv_inc(iv, iv_size, sz);
+ }
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1094,8 +1396,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
sec_free_req_id(req);
- /* IV output at encrypto of CBC mode */
- if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ /* IV output at encrypto of CBC/CTR mode */
+ if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
+ ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
while (1) {
@@ -1112,12 +1415,125 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
sk_req->base.complete(&sk_req->base, err);
}
-static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_aead_req *a_req = &req->aead_req;
+ size_t authsize = ctx->a_ctx.mac_len;
+ u32 data_size = aead_req->cryptlen;
+ u8 flage = 0;
+ u8 cm, cl;
+
+ /* the specification has been checked in aead_iv_demension_check() */
+ cl = c_req->c_ivin[0] + 1;
+ c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
+ memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
+ c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
+
+ /* the last 3bit is L' */
+ flage |= c_req->c_ivin[0] & IV_CL_MASK;
+
+ /* the M' is bit3~bit5, the Flags is bit6 */
+ cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
+ flage |= cm << IV_CM_OFFSET;
+ if (aead_req->assoclen)
+ flage |= 0x01 << IV_FLAGS_OFFSET;
+
+ memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
+ a_req->a_ivin[0] = flage;
+
+ /*
+ * the last 32bit is counter's initial number,
+ * but the nonce uses the first 16bit
+ * the tail 16bit fill with the cipher length
+ */
+ if (!c_req->encrypt)
+ data_size = aead_req->cryptlen - authsize;
+
+ a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
+ data_size & IV_LAST_BYTE_MASK;
+ data_size >>= IV_BYTE_OFFSET;
+ a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
+ data_size & IV_LAST_BYTE_MASK;
+}
+
+static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_aead_req *a_req = &req->aead_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
+ /*
+ * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
+ * the counter must set to 0x01
+ */
+ ctx->a_ctx.mac_len = authsize;
+ /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
+ set_aead_auth_iv(ctx, req);
+ }
+
+ /* GCM 12Byte Cipher_IV == Auth_IV */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ ctx->a_ctx.mac_len = authsize;
+ memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
+ }
+}
+
+static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+
+ /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
+ sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
+ sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
+ sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
+}
+
+static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe3 *sqe3)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+ sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+
+ /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ sqe3->a_key_addr = sqe3->c_key_addr;
+ sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
+ sqe3->auth_mac_key |= SEC_NO_AUTH;
+
+ if (dir)
+ sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
+ else
+ sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
+
+ sqe3->a_len_key = cpu_to_le32(aq->assoclen);
+ sqe3->auth_src_offset = cpu_to_le16(0x0);
+ sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+ sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
}
static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
@@ -1139,13 +1555,13 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
- sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
-
- if (dir)
+ if (dir) {
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
- else
+ } else {
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
-
+ }
sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
@@ -1165,7 +1581,68 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
- sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
+ ctx->c_ctx.c_mode == SEC_CMODE_GCM)
+ sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+ else
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe3 *sqe3)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sqe3->auth_mac_key |=
+ cpu_to_le32((u32)(ctx->mac_len /
+ SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+
+ sqe3->auth_mac_key |=
+ cpu_to_le32((u32)(ctx->a_key_len /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
+
+ sqe3->auth_mac_key |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
+
+ if (dir) {
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
+ } else {
+ sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
+ sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
+ }
+ sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
+}
+
+static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
+ int ret;
+
+ ret = sec_skcipher_bd_fill_v3(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
+ return ret;
+ }
+
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
+ ctx->c_ctx.c_mode == SEC_CMODE_GCM)
+ sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
+ req, sec_sqe3);
+ else
+ sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
+ req, sec_sqe3);
return 0;
}
@@ -1254,7 +1731,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
goto err_uninit_req;
/* Output IV as decrypto */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
+ ctx->c_ctx.c_mode == SEC_CMODE_CTR))
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
@@ -1296,20 +1774,51 @@ static const struct sec_req_op sec_skcipher_req_ops = {
static const struct sec_req_op sec_aead_req_ops = {
.buf_map = sec_aead_sgl_map,
.buf_unmap = sec_aead_sgl_unmap,
- .do_transfer = sec_aead_copy_iv,
+ .do_transfer = sec_aead_set_iv,
.bd_fill = sec_aead_bd_fill,
.bd_send = sec_bd_send,
.callback = sec_aead_callback,
.process = sec_process,
};
+static const struct sec_req_op sec_skcipher_req_ops_v3 = {
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill_v3,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static const struct sec_req_op sec_aead_req_ops_v3 = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_set_iv,
+ .bd_fill = sec_aead_bd_fill_v3,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- ctx->req_op = &sec_skcipher_req_ops;
+ ret = sec_skcipher_init(tfm);
+ if (ret)
+ return ret;
- return sec_skcipher_init(tfm);
+ if (ctx->sec->qm.ver < QM_HW_V3) {
+ ctx->type_supported = SEC_BD_TYPE2;
+ ctx->req_op = &sec_skcipher_req_ops;
+ } else {
+ ctx->type_supported = SEC_BD_TYPE3;
+ ctx->req_op = &sec_skcipher_req_ops_v3;
+ }
+
+ return ret;
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
@@ -1325,15 +1834,22 @@ static int sec_aead_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
- if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(ctx->dev, "get error aead iv size!\n");
+ if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
+ ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ pr_err("get error aead iv size!\n");
return -EINVAL;
}
- ctx->req_op = &sec_aead_req_ops;
ret = sec_ctx_base_init(ctx);
if (ret)
return ret;
+ if (ctx->sec->qm.ver < QM_HW_V3) {
+ ctx->type_supported = SEC_BD_TYPE2;
+ ctx->req_op = &sec_aead_req_ops;
+ } else {
+ ctx->type_supported = SEC_BD_TYPE3;
+ ctx->req_op = &sec_aead_req_ops_v3;
+ }
ret = sec_auth_init(ctx);
if (ret)
@@ -1391,6 +1907,41 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
sec_aead_exit(tfm);
}
+static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
+ return ret;
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
+ }
+ a_ctx->fallback = false;
+
+ return 0;
+}
+
+static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
+ sec_aead_exit(tfm);
+}
+
static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
{
return sec_aead_ctx_init(tfm, "sha1");
@@ -1429,6 +1980,14 @@ static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
ret = -EINVAL;
}
break;
+ case SEC_CMODE_CFB:
+ case SEC_CMODE_OFB:
+ case SEC_CMODE_CTR:
+ if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
+ dev_err(dev, "skcipher HW version error!\n");
+ ret = -EINVAL;
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -1442,7 +2001,8 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst)) {
+ if (unlikely(!sk_req->src || !sk_req->dst ||
+ sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
@@ -1468,6 +2028,37 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
+static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
+ struct skcipher_request *sreq, bool encrypt)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
+ int ret;
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
+
+ if (!c_ctx->fbtfm) {
+ dev_err(dev, "failed to check fallback tfm\n");
+ return -EINVAL;
+ }
+
+ skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
+
+ /* software need sync mode to do crypto */
+ skcipher_request_set_callback(subreq, sreq->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
+ sreq->cryptlen, sreq->iv);
+ if (encrypt)
+ ret = crypto_skcipher_encrypt(subreq);
+ else
+ ret = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+
+ return ret;
+}
+
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
@@ -1475,8 +2066,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- if (!sk_req->cryptlen)
+ if (!sk_req->cryptlen) {
+ if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
+ return -EINVAL;
return 0;
+ }
req->flag = sk_req->base.flags;
req->c_req.sk_req = sk_req;
@@ -1487,6 +2081,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (unlikely(ret))
return -EINVAL;
+ if (unlikely(ctx->c_ctx.fallback))
+ return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
+
return ctx->req_op->process(ctx, req);
}
@@ -1507,7 +2104,9 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
+ .cra_flags = CRYPTO_ALG_ASYNC |\
+ CRYPTO_ALG_ALLOCATES_MEMORY |\
+ CRYPTO_ALG_NEED_FALLBACK,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
@@ -1541,11 +2140,11 @@ static struct skcipher_alg sec_skciphers[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
DES3_EDE_BLOCK_SIZE, 0)
SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
@@ -1557,6 +2156,90 @@ static struct skcipher_alg sec_skciphers[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static struct skcipher_alg sec_skciphers_v3[] = {
+ SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+};
+
+static int aead_iv_demension_check(struct aead_request *aead_req)
+{
+ u8 cl;
+
+ cl = aead_req->iv[0] + 1;
+ if (cl < IV_CL_MIN || cl > IV_CL_MAX)
+ return -EINVAL;
+
+ if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
+ return -EOVERFLOW;
+
+ return 0;
+}
+
+static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 c_mode = ctx->c_ctx.c_mode;
+ struct device *dev = ctx->dev;
+ int ret;
+
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
+ return -EINVAL;
+ }
+
+ if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
+ (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
+ authsize & MAC_LEN_MASK)))) {
+ dev_err(dev, "aead input mac length error!\n");
+ return -EINVAL;
+ }
+
+ if (c_mode == SEC_CMODE_CCM) {
+ ret = aead_iv_demension_check(req);
+ if (ret) {
+ dev_err(dev, "aead input iv param error!\n");
+ return ret;
+ }
+ }
+
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+ if (c_mode == SEC_CMODE_CBC) {
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "aead crypto length error!\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
@@ -1565,34 +2248,61 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!req->src || !req->dst || !req->cryptlen ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
+ if (unlikely(!req->src || !req->dst)) {
dev_err(dev, "aead input param error!\n");
return -EINVAL;
}
+ if (ctx->sec->qm.ver == QM_HW_V2) {
+ if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
+ req->cryptlen <= authsize))) {
+ dev_err(dev, "Kunpeng920 not support 0 length!\n");
+ ctx->a_ctx.fallback = true;
+ return -EINVAL;
+ }
+ }
+
+ /* Support AES or SM4 */
+ if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
+ dev_err(dev, "aead crypto alg error!\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(sec_aead_spec_check(ctx, sreq)))
+ return -EINVAL;
+
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
SEC_PBUF_SZ)
sreq->use_pbuf = true;
else
sreq->use_pbuf = false;
- /* Support AES only */
- if (unlikely(c_alg != SEC_CALG_AES)) {
- dev_err(dev, "aead crypto alg error!\n");
- return -EINVAL;
- }
- if (sreq->c_req.encrypt)
- sreq->c_req.c_len = req->cryptlen;
- else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ return 0;
+}
- if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "aead crypto length error!\n");
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ struct aead_request *aead_req,
+ bool encrypt)
+{
+ struct aead_request *subreq = aead_request_ctx(aead_req);
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ struct device *dev = ctx->dev;
+
+ /* Kunpeng920 aead mode not support input 0 size */
+ if (!a_ctx->fallback_aead_tfm) {
+ dev_err(dev, "aead fallback tfm is NULL!\n");
return -EINVAL;
}
- return 0;
+ aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
+ aead_request_set_callback(subreq, aead_req->base.flags,
+ aead_req->base.complete, aead_req->base.data);
+ aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
+ aead_req->cryptlen, aead_req->iv);
+ aead_request_set_ad(subreq, aead_req->assoclen);
+
+ return encrypt ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
}
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
@@ -1608,8 +2318,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->ctx = ctx;
ret = sec_aead_param_check(ctx, req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ if (ctx->a_ctx.fallback)
+ return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
+ }
return ctx->req_op->process(ctx, req);
}
@@ -1624,14 +2337,16 @@ static int sec_aead_decrypt(struct aead_request *a_req)
return sec_aead_crypto(a_req, false);
}
-#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
ctx_exit, blk_size, iv_size, max_authsize)\
{\
.base = {\
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
+ .cra_flags = CRYPTO_ALG_ASYNC |\
+ CRYPTO_ALG_ALLOCATES_MEMORY |\
+ CRYPTO_ALG_NEED_FALLBACK,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
@@ -1639,28 +2354,46 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.init = ctx_init,\
.exit = ctx_exit,\
.setkey = sec_set_key,\
+ .setauthsize = sec_aead_setauthsize,\
.decrypt = sec_aead_decrypt,\
.encrypt = sec_aead_encrypt,\
.ivsize = iv_size,\
.maxauthsize = max_authsize,\
}
-#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
- SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
- sec_aead_ctx_exit, blksize, ivsize, authsize)
-
static struct aead_alg sec_aeads[] = {
SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+ sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+ sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+
+ SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ SEC_AIV_SIZE, AES_BLOCK_SIZE)
+};
+
+static struct aead_alg sec_aeads_v3[] = {
+ SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+
+ SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+ SEC_AIV_SIZE, AES_BLOCK_SIZE)
};
int sec_register_to_crypto(struct hisi_qm *qm)
@@ -1673,16 +2406,45 @@ int sec_register_to_crypto(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->ver > QM_HW_V2) {
+ ret = crypto_register_skciphers(sec_skciphers_v3,
+ ARRAY_SIZE(sec_skciphers_v3));
+ if (ret)
+ goto reg_skcipher_fail;
+ }
+
ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
if (ret)
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ goto reg_aead_fail;
+ if (qm->ver > QM_HW_V2) {
+ ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
+ if (ret)
+ goto reg_aead_v3_fail;
+ }
+ return ret;
+
+reg_aead_v3_fail:
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+reg_aead_fail:
+ if (qm->ver > QM_HW_V2)
+ crypto_unregister_skciphers(sec_skciphers_v3,
+ ARRAY_SIZE(sec_skciphers_v3));
+reg_skcipher_fail:
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(struct hisi_qm *qm)
{
+ if (qm->ver > QM_HW_V2)
+ crypto_unregister_aeads(sec_aeads_v3,
+ ARRAY_SIZE(sec_aeads_v3));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+
+ if (qm->ver > QM_HW_V2)
+ crypto_unregister_skciphers(sec_skciphers_v3,
+ ARRAY_SIZE(sec_skciphers_v3));
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 9c78edac56a4..9f71c358a6d3 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -4,9 +4,11 @@
#ifndef __HISI_SEC_V2_CRYPTO_H
#define __HISI_SEC_V2_CRYPTO_H
+#define SEC_AIV_SIZE 12
#define SEC_IV_SIZE 24
#define SEC_MAX_KEY_SIZE 64
#define SEC_COMM_SCENE 0
+#define SEC_MIN_BLOCK_SZ 1
enum sec_calg {
SEC_CALG_3DES = 0x1,
@@ -21,6 +23,11 @@ enum sec_hash_alg {
};
enum sec_mac_len {
+ SEC_HMAC_CCM_MAC = 16,
+ SEC_HMAC_GCM_MAC = 16,
+ SEC_SM3_MAC = 32,
+ SEC_HMAC_SM3_MAC = 32,
+ SEC_HMAC_MD5_MAC = 16,
SEC_HMAC_SHA1_MAC = 20,
SEC_HMAC_SHA256_MAC = 32,
SEC_HMAC_SHA512_MAC = 64,
@@ -29,7 +36,11 @@ enum sec_mac_len {
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CFB = 0x2,
+ SEC_CMODE_OFB = 0x3,
SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_CCM = 0x5,
+ SEC_CMODE_GCM = 0x6,
SEC_CMODE_XTS = 0x7,
};
@@ -44,6 +55,7 @@ enum sec_ckey_type {
enum sec_bd_type {
SEC_BD_TYPE1 = 0x1,
SEC_BD_TYPE2 = 0x2,
+ SEC_BD_TYPE3 = 0x3,
};
enum sec_auth {
@@ -63,6 +75,24 @@ enum sec_addr_type {
SEC_PRP = 0x2,
};
+struct bd_status {
+ u64 tag;
+ u8 done;
+ u8 err_type;
+ u16 flag;
+ u16 icv;
+};
+
+enum {
+ AUTHPAD_PAD,
+ AUTHPAD_NOPAD,
+};
+
+enum {
+ AIGEN_GEN,
+ AIGEN_NOGEN,
+};
+
struct sec_sqe_type2 {
/*
* mac_len: 0~4 bits
@@ -209,6 +239,169 @@ struct sec_sqe {
struct sec_sqe_type2 type2;
};
+struct bd3_auth_ivin {
+ __le64 a_ivin_addr;
+ __le32 rsvd0;
+ __le32 rsvd1;
+} __packed __aligned(4);
+
+struct bd3_skip_data {
+ __le32 rsvd0;
+
+ /*
+ * gran_num: 0~15 bits
+ * reserved: 16~31 bits
+ */
+ __le32 gran_num;
+
+ /*
+ * src_skip_data_len: 0~24 bits
+ * reserved: 25~31 bits
+ */
+ __le32 src_skip_data_len;
+
+ /*
+ * dst_skip_data_len: 0~24 bits
+ * reserved: 25~31 bits
+ */
+ __le32 dst_skip_data_len;
+};
+
+struct bd3_stream_scene {
+ __le64 c_ivin_addr;
+ __le64 long_a_data_len;
+
+ /*
+ * auth_pad: 0~1 bits
+ * stream_protocol: 2~4 bits
+ * reserved: 5~7 bits
+ */
+ __u8 stream_auth_pad;
+ __u8 plaintext_type;
+ __le16 pad_len_1p3;
+} __packed __aligned(4);
+
+struct bd3_no_scene {
+ __le64 c_ivin_addr;
+ __le32 rsvd0;
+ __le32 rsvd1;
+ __le32 rsvd2;
+} __packed __aligned(4);
+
+struct bd3_check_sum {
+ __u8 rsvd0;
+ __u8 hac_sva_status;
+ __le16 check_sum_i;
+};
+
+struct bd3_tls_type_back {
+ __u8 tls_1p3_type_back;
+ __u8 hac_sva_status;
+ __le16 pad_len_1p3_back;
+};
+
+struct sec_sqe3 {
+ /*
+ * type: 0~3 bit
+ * bd_invalid: 4 bit
+ * scene: 5~8 bit
+ * de: 9~10 bit
+ * src_addr_type: 11~13 bit
+ * dst_addr_type: 14~16 bit
+ * mac_addr_type: 17~19 bit
+ * reserved: 20~31 bits
+ */
+ __le32 bd_param;
+
+ /*
+ * cipher: 0~1 bits
+ * ci_gen: 2~3 bit
+ * c_icv_len: 4~9 bit
+ * c_width: 10~12 bits
+ * c_key_len: 13~15 bits
+ */
+ __le16 c_icv_key;
+
+ /*
+ * c_mode : 0~3 bits
+ * c_alg : 4~7 bits
+ */
+ __u8 c_mode_alg;
+
+ /*
+ * nonce_len : 0~3 bits
+ * huk : 4 bits
+ * cal_iv_addr_en : 5 bits
+ * seq : 6 bits
+ * reserved : 7 bits
+ */
+ __u8 huk_iv_seq;
+
+ __le64 tag;
+ __le64 data_src_addr;
+ __le64 a_key_addr;
+ union {
+ struct bd3_auth_ivin auth_ivin;
+ struct bd3_skip_data skip_data;
+ };
+
+ __le64 c_key_addr;
+
+ /*
+ * auth: 0~1 bits
+ * ai_gen: 2~3 bits
+ * mac_len: 4~8 bits
+ * akey_len: 9~14 bits
+ * a_alg: 15~20 bits
+ * key_sel: 21~24 bits
+ * updata_key: 25 bits
+ * reserved: 26~31 bits
+ */
+ __le32 auth_mac_key;
+ __le32 salt;
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+
+ /*
+ * auth_len: 0~23 bit
+ * auth_key_offset: 24~31 bits
+ */
+ __le32 a_len_key;
+
+ /*
+ * cipher_len: 0~23 bit
+ * auth_ivin_offset: 24~31 bits
+ */
+ __le32 c_len_ivin;
+ __le64 data_dst_addr;
+ __le64 mac_addr;
+ union {
+ struct bd3_stream_scene stream_scene;
+ struct bd3_no_scene no_scene;
+ };
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bit
+ * csc: 4~6 bit
+ * flag: 7~10 bit
+ * reserved: 11~15 bit
+ */
+ __le16 done_flag;
+ __u8 error_type;
+ __u8 warning_type;
+ union {
+ __le32 mac_i;
+ __le32 kek_key_addr_l;
+ };
+ union {
+ __le32 kek_key_addr_h;
+ struct bd3_check_sum check_sum;
+ struct bd3_tls_type_back tls_type_back;
+ };
+ __le32 counter;
+} __packed __aligned(4);
+
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 6f0062d4408c..d120ce3e34ed 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -52,6 +52,7 @@
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x7c177
+#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
@@ -84,6 +85,12 @@
#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
+#define SEC_PREFETCH_CFG 0x301130
+#define SEC_SVA_TRANS 0x301EC4
+#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
+#define SEC_PREFETCH_DISABLE BIT(1)
+#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_DBGFS_VAL_MAX_LEN 20
@@ -91,6 +98,7 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
+#define SEC_SHAPER_TYPE_RATE 128
struct sec_hw_error {
u32 int_msk;
@@ -331,6 +339,42 @@ static u8 sec_get_endian(struct hisi_qm *qm)
return SEC_64BE;
}
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+}
+
static int sec_engine_init(struct hisi_qm *qm)
{
int ret;
@@ -430,53 +474,60 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void sec_hw_error_enable(struct hisi_qm *qm)
+static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
- u32 val;
+ u32 val1, val2;
+
+ val1 = readl(qm->io_base + SEC_CONTROL_REG);
+ if (enable) {
+ val1 |= SEC_AXI_SHUTDOWN_ENABLE;
+ val2 = SEC_RAS_NFE_ENB_MSK;
+ } else {
+ val1 &= SEC_AXI_SHUTDOWN_DISABLE;
+ val2 = 0x0;
+ }
+
+ if (qm->ver > QM_HW_V2)
+ writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+
+ writel(val1, qm->io_base + SEC_CONTROL_REG);
+}
+static void sec_hw_error_enable(struct hisi_qm *qm)
+{
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(qm->io_base + SEC_CONTROL_REG);
-
/* clear SEC hw error source if having */
writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
- /* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
-
/* enable RAS int */
writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
- /* enable SEC block master OOO when m-bit error occur */
- val = val | SEC_AXI_SHUTDOWN_ENABLE;
+ /* enable SEC block master OOO when nfe occurs on Kunpeng930 */
+ sec_master_ooo_ctrl(qm, true);
- writel(val, qm->io_base + SEC_CONTROL_REG);
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
- u32 val;
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- val = readl(qm->io_base + SEC_CONTROL_REG);
+ /* disable SEC block master OOO when nfe occurs on Kunpeng930 */
+ sec_master_ooo_ctrl(qm, false);
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
-
- /* disable SEC hw error interrupts */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
-
- /* disable SEC block master OOO when m-bit error occur */
- val = val & SEC_AXI_SHUTDOWN_DISABLE;
-
- writel(val, qm->io_base + SEC_CONTROL_REG);
}
static u32 sec_clear_enable_read(struct sec_debug_file *file)
@@ -743,6 +794,8 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
+ .open_sva_prefetch = sec_open_sva_prefetch,
+ .close_sva_prefetch = sec_close_sva_prefetch,
.err_info_init = sec_err_info_init,
};
@@ -758,6 +811,7 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
+ sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
@@ -821,6 +875,7 @@ static void sec_qm_uninit(struct hisi_qm *qm)
static int sec_probe_init(struct sec_dev *sec)
{
+ u32 type_rate = SEC_SHAPER_TYPE_RATE;
struct hisi_qm *qm = &sec->qm;
int ret;
@@ -828,6 +883,11 @@ static int sec_probe_init(struct sec_dev *sec)
ret = sec_pf_probe_init(sec);
if (ret)
return ret;
+ /* enable shaper type 0 */
+ if (qm->ver >= QM_HW_V3) {
+ type_rate |= QM_SHAPER_ENABLE;
+ qm->type_rate = type_rate;
+ }
}
return 0;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 2178b40e9f82..f8482ceebf2a 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -68,6 +68,7 @@
#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
@@ -96,6 +97,16 @@
#define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
HZIP_RO_CNT_CLR_CE_EN)
+#define HZIP_PREFETCH_CFG 0x3011B0
+#define HZIP_SVA_TRANS 0x3011C4
+#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
+#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
+#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SHAPER_RATE_COMPRESS 252
+#define HZIP_SHAPER_RATE_DECOMPRESS 229
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@@ -262,6 +273,45 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
+ val &= HZIP_PREFETCH_ENABLE;
+ writel(val, qm->io_base + HZIP_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+}
+
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
+ val |= HZIP_SVA_PREFETCH_DISABLE;
+ writel(val, qm->io_base + HZIP_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
+ HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+}
+
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
@@ -312,10 +362,27 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
return 0;
}
-static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
- u32 val;
+ u32 val1, val2;
+
+ val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ if (enable) {
+ val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
+ val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+ } else {
+ val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
+ val2 = 0x0;
+ }
+
+ if (qm->ver > QM_HW_V2)
+ writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+ writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+{
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -333,26 +400,20 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
+ hisi_zip_master_ooo_ctrl(qm, true);
+
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
-
- /* enable ZIP block master OOO when m-bit error occur */
- val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
- u32 val;
-
/* disable ZIP hw error interrupts */
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
- /* disable ZIP block master OOO when m-bit error occur */
- val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
+ hisi_zip_master_ooo_ctrl(qm, false);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -684,6 +745,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
+ .open_sva_prefetch = hisi_zip_open_sva_prefetch,
+ .close_sva_prefetch = hisi_zip_close_sva_prefetch,
.err_info_init = hisi_zip_err_info_init,
};
@@ -702,6 +765,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
qm->err_ini->err_info_init(qm);
hisi_zip_set_user_domain_and_cache(qm);
+ hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -761,6 +825,7 @@ static void hisi_zip_qm_uninit(struct hisi_qm *qm)
static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
{
+ u32 type_rate = HZIP_SHAPER_RATE_COMPRESS;
struct hisi_qm *qm = &hisi_zip->qm;
int ret;
@@ -768,6 +833,14 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
+ /* enable shaper type 0 */
+ if (qm->ver >= QM_HW_V3) {
+ type_rate |= QM_SHAPER_ENABLE;
+
+ /* ZIP need to enable shaper type 1 */
+ type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET;
+ qm->type_rate = type_rate;
+ }
}
return 0;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 0616e369522e..35fc5ee70491 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
@@ -71,15 +72,11 @@
#define MOD_AES256 (0x0a00 | KEYLEN_256)
#define MAX_IVLEN 16
-#define NPE_ID 2 /* NPE C */
#define NPE_QLEN 16
/* Space for registering when the first
* NPE_QLEN crypt_ctl are busy */
#define NPE_QLEN_TOTAL 64
-#define SEND_QID 29
-#define RECV_QID 30
-
#define CTL_FLAG_UNUSED 0x0000
#define CTL_FLAG_USED 0x1000
#define CTL_FLAG_PERFORM_ABLK 0x0001
@@ -136,7 +133,7 @@ struct crypt_ctl {
u32 crypto_ctx; /* NPE Crypto Param structure address */
/* Used by Host: 4*4 bytes*/
- unsigned ctl_flags;
+ unsigned int ctl_flags;
union {
struct skcipher_request *ablk_req;
struct aead_request *aead_req;
@@ -149,6 +146,9 @@ struct crypt_ctl {
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
+ u8 iv[MAX_IVLEN];
+ bool encrypt;
+ struct skcipher_request fallback_req; // keep at the end
};
struct aead_ctx {
@@ -181,9 +181,10 @@ struct ixp_ctx {
u8 enckey[MAX_KEYLEN];
u8 salt[MAX_IVLEN];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
- unsigned salted;
+ unsigned int salted;
atomic_t configuring;
struct completion completion;
+ struct crypto_skcipher *fallback_tfm;
};
struct ixp_alg {
@@ -209,6 +210,7 @@ static const struct ix_hash_algo hash_alg_md5 = {
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
};
+
static const struct ix_hash_algo hash_alg_sha1 = {
.cfgword = 0x00000005,
.icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
@@ -216,16 +218,17 @@ static const struct ix_hash_algo hash_alg_sha1 = {
};
static struct npe *npe_c;
-static struct dma_pool *buffer_pool = NULL;
-static struct dma_pool *ctx_pool = NULL;
-static struct crypt_ctl *crypt_virt = NULL;
+static unsigned int send_qid;
+static unsigned int recv_qid;
+static struct dma_pool *buffer_pool;
+static struct dma_pool *ctx_pool;
+
+static struct crypt_ctl *crypt_virt;
static dma_addr_t crypt_phys;
static int support_aes = 1;
-#define DRIVER_NAME "ixp4xx_crypto"
-
static struct platform_device *pdev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
@@ -240,12 +243,12 @@ static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
- return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
@@ -256,6 +259,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
+
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
@@ -269,7 +273,7 @@ static DEFINE_SPINLOCK(desc_lock);
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
- static int idx = 0;
+ static int idx;
unsigned long flags;
spin_lock_irqsave(&desc_lock, flags);
@@ -286,7 +290,7 @@ static struct crypt_ctl *get_crypt_desc(void)
idx = 0;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&desc_lock, flags);
- return crypt_virt +i;
+ return crypt_virt + i;
} else {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
@@ -314,7 +318,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
idx = NPE_QLEN;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&emerg_lock, flags);
- return crypt_virt +i;
+ return crypt_virt + i;
} else {
spin_unlock_irqrestore(&emerg_lock, flags);
return NULL;
@@ -330,7 +334,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
buf1 = buf->next;
phys1 = buf->phys_next;
- dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
+ dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
@@ -348,8 +352,8 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
int decryptlen = req->assoclen + req->cryptlen - authsize;
if (req_ctx->encrypt) {
- scatterwalk_map_and_copy(req_ctx->hmac_virt,
- req->dst, decryptlen, authsize, 1);
+ scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
+ decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
@@ -372,19 +376,33 @@ static void one_packet(dma_addr_t phys)
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
- if (req_ctx->hmac_virt) {
+ if (req_ctx->hmac_virt)
finish_scattered_hmac(crypt);
- }
+
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
struct skcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ unsigned int offset;
+
+ if (ivsize > 0) {
+ offset = req->cryptlen - ivsize;
+ if (req_ctx->encrypt) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ offset, ivsize, 0);
+ } else {
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ memzero_explicit(req_ctx->iv, ivsize);
+ }
+ }
- if (req_ctx->dst) {
+ if (req_ctx->dst)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
- }
+
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
req->base.complete(&req->base, failed);
break;
@@ -392,14 +410,14 @@ static void one_packet(dma_addr_t phys)
case CTL_FLAG_GEN_ICV:
ctx = crypto_tfm_ctx(crypt->data.tfm);
dma_pool_free(ctx_pool, crypt->regist_ptr,
- crypt->regist_buf->phys_addr);
+ crypt->regist_buf->phys_addr);
dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
- *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+ *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
@@ -418,8 +436,8 @@ static void crypto_done_action(unsigned long arg)
{
int i;
- for(i=0; i<4; i++) {
- dma_addr_t phys = qmgr_get_entry(RECV_QID);
+ for (i = 0; i < 4; i++) {
+ dma_addr_t phys = qmgr_get_entry(recv_qid);
if (!phys)
return;
one_packet(phys);
@@ -429,15 +447,52 @@ static void crypto_done_action(unsigned long arg)
static int init_ixp_crypto(struct device *dev)
{
- int ret = -ENODEV;
+ struct device_node *np = dev->of_node;
u32 msg[2] = { 0, 0 };
+ int ret = -ENODEV;
+ u32 npe_id;
- if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
- IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
- printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
- return ret;
+ dev_info(dev, "probing...\n");
+
+ /* Locate the NPE and queue manager to use from device tree */
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ struct of_phandle_args queue_spec;
+ struct of_phandle_args npe_spec;
+
+ ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
+ 1, 0, &npe_spec);
+ if (ret) {
+ dev_err(dev, "no NPE engine specified\n");
+ return -ENODEV;
+ }
+ npe_id = npe_spec.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no rx queue phandle\n");
+ return -ENODEV;
+ }
+ recv_qid = queue_spec.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no txready queue phandle\n");
+ return -ENODEV;
+ }
+ send_qid = queue_spec.args[0];
+ } else {
+ /*
+ * Hardcoded engine when using platform data, this goes away
+ * when we switch to using DT only.
+ */
+ npe_id = 2;
+ send_qid = 29;
+ recv_qid = 30;
}
- npe_c = npe_request(NPE_ID);
+
+ npe_c = npe_request(npe_id);
if (!npe_c)
return ret;
@@ -455,10 +510,9 @@ static int init_ixp_crypto(struct device *dev)
goto npe_error;
}
- switch ((msg[1]>>16) & 0xff) {
+ switch ((msg[1] >> 16) & 0xff) {
case 3:
- printk(KERN_WARNING "Firmware of %s lacks AES support\n",
- npe_name(npe_c));
+ dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
support_aes = 0;
break;
case 4:
@@ -466,8 +520,7 @@ static int init_ixp_crypto(struct device *dev)
support_aes = 1;
break;
default:
- printk(KERN_ERR "Firmware of %s lacks crypto support\n",
- npe_name(npe_c));
+ dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
ret = -ENODEV;
goto npe_release;
}
@@ -475,35 +528,34 @@ static int init_ixp_crypto(struct device *dev)
* so assure it is large enough
*/
BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
- buffer_pool = dma_pool_create("buffer", dev,
- sizeof(struct buffer_desc), 32, 0);
+ buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
+ 32, 0);
ret = -ENOMEM;
- if (!buffer_pool) {
+ if (!buffer_pool)
goto err;
- }
- ctx_pool = dma_pool_create("context", dev,
- NPE_CTX_LEN, 16, 0);
- if (!ctx_pool) {
+
+ ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
+ if (!ctx_pool)
goto err;
- }
- ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
+
+ ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
"ixp_crypto:out", NULL);
if (ret)
goto err;
- ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
+ ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
"ixp_crypto:in", NULL);
if (ret) {
- qmgr_release_queue(SEND_QID);
+ qmgr_release_queue(send_qid);
goto err;
}
- qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
+ qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
- qmgr_enable_irq(RECV_QID);
+ qmgr_enable_irq(recv_qid);
return 0;
npe_error:
- printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
+ dev_err(dev, "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
dma_pool_destroy(ctx_pool);
@@ -515,22 +567,20 @@ npe_release:
static void release_ixp_crypto(struct device *dev)
{
- qmgr_disable_irq(RECV_QID);
+ qmgr_disable_irq(recv_qid);
tasklet_kill(&crypto_done_tasklet);
- qmgr_release_queue(SEND_QID);
- qmgr_release_queue(RECV_QID);
+ qmgr_release_queue(send_qid);
+ qmgr_release_queue(recv_qid);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
- if (crypt_virt) {
- dma_free_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- crypt_virt, crypt_phys);
- }
+ if (crypt_virt)
+ dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
+ crypt_virt, crypt_phys);
}
static void reset_sa_dir(struct ix_sa_dir *dir)
@@ -543,9 +593,9 @@ static void reset_sa_dir(struct ix_sa_dir *dir)
static int init_sa_dir(struct ix_sa_dir *dir)
{
dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
- if (!dir->npe_ctx) {
+ if (!dir->npe_ctx)
return -ENOMEM;
- }
+
reset_sa_dir(dir);
return 0;
}
@@ -566,15 +616,31 @@ static int init_tfm(struct crypto_tfm *tfm)
if (ret)
return ret;
ret = init_sa_dir(&ctx->decrypt);
- if (ret) {
+ if (ret)
free_sa_dir(&ctx->encrypt);
- }
+
return ret;
}
static int init_tfm_ablk(struct crypto_skcipher *tfm)
{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
+ struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+ struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+ const char *name = crypto_tfm_alg_name(ctfm);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ pr_info("Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&tfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
+ );
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
return init_tfm(crypto_skcipher_tfm(tfm));
}
@@ -587,12 +653,17 @@ static int init_tfm_aead(struct crypto_aead *tfm)
static void exit_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+
free_sa_dir(&ctx->encrypt);
free_sa_dir(&ctx->decrypt);
}
static void exit_tfm_ablk(struct crypto_skcipher *tfm)
{
+ struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+ struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+
+ crypto_free_skcipher(ctx->fallback_tfm);
exit_tfm(crypto_skcipher_tfm(tfm));
}
@@ -602,7 +673,8 @@ static void exit_tfm_aead(struct crypto_aead *tfm)
}
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
- int init_len, u32 ctx_addr, const u8 *key, int key_len)
+ int init_len, u32 ctx_addr, const u8 *key,
+ int key_len)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypt_ctl *crypt;
@@ -629,9 +701,8 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
memcpy(pad, key, key_len);
memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
- for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
+ for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
pad[i] ^= xpad;
- }
crypt->data.tfm = tfm;
crypt->regist_ptr = pad;
@@ -652,13 +723,13 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
buf->phys_addr = pad_phys;
atomic_inc(&ctx->configuring);
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return 0;
}
-static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
- const u8 *key, int key_len, unsigned digest_len)
+static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
+ const u8 *key, int key_len, unsigned int digest_len)
{
u32 itarget, otarget, npe_ctx_addr;
unsigned char *cinfo;
@@ -673,11 +744,11 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
algo = ix_hash(tfm);
/* write cfg word to cryptinfo */
- cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
+ cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
#ifndef __ARMEB__
cfgword ^= 0xAA000000; /* change the "byte swap" flags */
#endif
- *(u32*)cinfo = cpu_to_be32(cfgword);
+ *(u32 *)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
@@ -697,11 +768,11 @@ static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
dir->npe_mode |= NPE_OP_HASH_VERIFY;
ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
- init_len, npe_ctx_addr, key, key_len);
+ init_len, npe_ctx_addr, key, key_len);
if (ret)
return ret;
return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
- init_len, npe_ctx_addr, key, key_len);
+ init_len, npe_ctx_addr, key, key_len);
}
static int gen_rev_aes_key(struct crypto_tfm *tfm)
@@ -711,10 +782,10 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm)
struct ix_sa_dir *dir = &ctx->decrypt;
crypt = get_crypt_desc_emerg();
- if (!crypt) {
+ if (!crypt)
return -EAGAIN;
- }
- *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+
+ *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
@@ -727,13 +798,13 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm)
crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
atomic_inc(&ctx->configuring);
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return 0;
}
-static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
- const u8 *key, int key_len)
+static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
+ int key_len)
{
u8 *cinfo;
u32 cipher_cfg;
@@ -753,9 +824,15 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
- case 16: keylen_cfg = MOD_AES128; break;
- case 24: keylen_cfg = MOD_AES192; break;
- case 32: keylen_cfg = MOD_AES256; break;
+ case 16:
+ keylen_cfg = MOD_AES128;
+ break;
+ case 24:
+ keylen_cfg = MOD_AES192;
+ break;
+ case 32:
+ keylen_cfg = MOD_AES256;
+ break;
default:
return -EINVAL;
}
@@ -766,31 +843,31 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
return err;
}
/* write cfg word to cryptinfo */
- *(u32*)cinfo = cpu_to_be32(cipher_cfg);
+ *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
memcpy(cinfo, key, key_len);
/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
- memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
+ memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
key_len = DES3_EDE_KEY_SIZE;
}
dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
- if ((cipher_cfg & MOD_AES) && !encrypt) {
+ if ((cipher_cfg & MOD_AES) && !encrypt)
return gen_rev_aes_key(tfm);
- }
+
return 0;
}
static struct buffer_desc *chainup_buffers(struct device *dev,
- struct scatterlist *sg, unsigned nbytes,
+ struct scatterlist *sg, unsigned int nbytes,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
for (; nbytes > 0; sg = sg_next(sg)) {
- unsigned len = min(nbytes, sg->length);
+ unsigned int len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
dma_addr_t next_buf_phys;
void *ptr;
@@ -817,7 +894,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
}
static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
+ unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
@@ -838,7 +915,12 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
- return ret;
+ if (ret)
+ return ret;
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
}
static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -849,7 +931,7 @@ static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
+ unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -858,17 +940,36 @@ static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
- CTR_RFC3686_NONCE_SIZE);
+ CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ablk_setkey(tfm, key, key_len);
}
+static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
+ struct ablk_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (encrypt)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ return err;
+}
+
static int ablk_perform(struct skcipher_request *req, int encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned ivsize = crypto_skcipher_ivsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int nbytes = req->cryptlen;
@@ -876,15 +977,20 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
struct buffer_desc src_hook;
struct device *dev = &pdev->dev;
+ unsigned int offset;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- if (qmgr_stat_full(SEND_QID))
+ if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
+ return ixp4xx_cipher_fallback(req, encrypt);
+
+ if (qmgr_stat_full(send_qid))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+ req_ctx->encrypt = encrypt;
crypt = get_crypt_desc();
if (!crypt)
@@ -900,14 +1006,19 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ if (ivsize > 0 && !encrypt) {
+ offset = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
+ }
if (req->src != req->dst) {
struct buffer_desc dst_hook;
+
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
- flags, DMA_FROM_DEVICE))
+ flags, DMA_FROM_DEVICE))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
req_ctx->dst = dst_hook.next;
@@ -916,23 +1027,23 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
req_ctx->dst = NULL;
}
req_ctx->src = NULL;
- if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
- flags, src_direction))
+ if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
+ src_direction))
goto free_buf_src;
req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return -EINPROGRESS;
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dest:
- if (req->src != req->dst) {
+ if (req->src != req->dst)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
- }
+
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
@@ -956,7 +1067,7 @@ static int ablk_rfc3686_crypt(struct skcipher_request *req)
int ret;
/* set up counter block */
- memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
@@ -970,12 +1081,12 @@ static int ablk_rfc3686_crypt(struct skcipher_request *req)
}
static int aead_perform(struct aead_request *req, int encrypt,
- int cryptoffset, int eff_cryptlen, u8 *iv)
+ int cryptoffset, int eff_cryptlen, u8 *iv)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- unsigned ivsize = crypto_aead_ivsize(tfm);
- unsigned authsize = crypto_aead_authsize(tfm);
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int authsize = crypto_aead_authsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int cryptlen;
@@ -987,7 +1098,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
unsigned int lastlen;
- if (qmgr_stat_full(SEND_QID))
+ if (qmgr_stat_full(send_qid))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
@@ -998,7 +1109,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
} else {
dir = &ctx->decrypt;
/* req->cryptlen includes the authsize when decrypting */
- cryptlen = req->cryptlen -authsize;
+ cryptlen = req->cryptlen - authsize;
eff_cryptlen -= authsize;
}
crypt = get_crypt_desc();
@@ -1058,12 +1169,12 @@ static int aead_perform(struct aead_request *req, int encrypt,
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
- &crypt->icv_rev_aes);
+ &crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
- req->src, cryptlen, authsize, 0);
+ req->src, cryptlen, authsize, 0);
}
req_ctx->encrypt = encrypt;
} else {
@@ -1071,8 +1182,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
- qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
- BUG_ON(qmgr_stat_overflow(SEND_QID));
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
return -EINPROGRESS;
free_buf_dst:
@@ -1086,7 +1197,7 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- unsigned digest_len = crypto_aead_maxauthsize(tfm);
+ unsigned int digest_len = crypto_aead_maxauthsize(tfm);
int ret;
if (!ctx->enckey_len && !ctx->authkey_len)
@@ -1104,11 +1215,11 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
if (ret)
goto out;
ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
- ctx->authkey_len, digest_len);
+ ctx->authkey_len, digest_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
- ctx->authkey_len, digest_len);
+ ctx->authkey_len, digest_len);
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1119,13 +1230,13 @@ static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int max = crypto_aead_maxauthsize(tfm) >> 2;
- if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
+ if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
return -EINVAL;
return aead_setup(tfm, authsize);
}
static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
@@ -1364,43 +1475,33 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
#define IXP_POSTFIX "-ixp4xx"
-static const struct platform_device_info ixp_dev_info __initdata = {
- .name = DRIVER_NAME,
- .id = 0,
- .dma_mask = DMA_BIT_MASK(32),
-};
-
-static int __init ixp_module_init(void)
+static int ixp_crypto_probe(struct platform_device *_pdev)
{
+ struct device *dev = &_pdev->dev;
int num = ARRAY_SIZE(ixp4xx_algos);
int i, err;
- pdev = platform_device_register_full(&ixp_dev_info);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ pdev = _pdev;
- err = init_ixp_crypto(&pdev->dev);
- if (err) {
- platform_device_unregister(pdev);
+ err = init_ixp_crypto(dev);
+ if (err)
return err;
- }
- for (i=0; i< num; i++) {
+
+ for (i = 0; i < num; i++) {
struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s"IXP_POSTFIX, cra->base.cra_name) >=
- CRYPTO_MAX_ALG_NAME)
- {
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
continue;
- }
- if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
+ if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
continue;
- }
/* block ciphers */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK;
if (!cra->setkey)
cra->setkey = ablk_setkey;
if (!cra->encrypt)
@@ -1415,7 +1516,7 @@ static int __init ixp_module_init(void)
cra->base.cra_alignmask = 3;
cra->base.cra_priority = 300;
if (crypto_register_skcipher(cra))
- printk(KERN_ERR "Failed to register '%s'\n",
+ dev_err(&pdev->dev, "Failed to register '%s'\n",
cra->base.cra_name);
else
ixp4xx_algos[i].registered = 1;
@@ -1448,7 +1549,7 @@ static int __init ixp_module_init(void)
cra->base.cra_priority = 300;
if (crypto_register_aead(cra))
- printk(KERN_ERR "Failed to register '%s'\n",
+ dev_err(&pdev->dev, "Failed to register '%s'\n",
cra->base.cra_driver_name);
else
ixp4xx_aeads[i].registered = 1;
@@ -1456,7 +1557,7 @@ static int __init ixp_module_init(void)
return 0;
}
-static void __exit ixp_module_exit(void)
+static int ixp_crypto_remove(struct platform_device *pdev)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
@@ -1466,16 +1567,30 @@ static void __exit ixp_module_exit(void)
crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
}
- for (i=0; i< num; i++) {
+ for (i = 0; i < num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
- platform_device_unregister(pdev);
+
+ return 0;
}
+static const struct of_device_id ixp4xx_crypto_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-crypto",
+ },
+ {},
+};
-module_init(ixp_module_init);
-module_exit(ixp_module_exit);
+static struct platform_driver ixp_crypto_driver = {
+ .probe = ixp_crypto_probe,
+ .remove = ixp_crypto_remove,
+ .driver = {
+ .name = "ixp4xx_crypto",
+ .of_match_table = ixp4xx_crypto_of_match,
+ },
+};
+module_platform_driver(ixp_crypto_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index c1007f2ba79c..d215a6bed6bc 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -66,7 +66,7 @@
#define CESA_SA_ST_ACT_1 BIT(1)
/*
- * CESA_SA_FPGA_INT_STATUS looks like a FPGA leftover and is documented only
+ * CESA_SA_FPGA_INT_STATUS looks like an FPGA leftover and is documented only
* in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
* and someone forgot to remove it while switching to the core and moving to
* CESA_SA_INT_STATUS.
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
index b9c6201019e0..c242d22008c3 100644
--- a/drivers/crypto/marvell/octeontx2/Makefile
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o octeontx2-cptvf.o
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
-octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
- otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o
-octeontx2-cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
- otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
- otx2_cptvf_algs.o
+rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+ otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
+ cn10k_cpt.o
+rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+ otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+ otx2_cptvf_algs.o cn10k_cpt.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
new file mode 100644
index 000000000000..1499ef75b5c2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Marvell. */
+
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include "otx2_cptpf.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "cn10k_cpt.h"
+
+static struct cpt_hw_ops otx2_hw_ops = {
+ .send_cmd = otx2_cpt_send_cmd,
+ .cpt_get_compcode = otx2_cpt_get_compcode,
+ .cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
+};
+
+static struct cpt_hw_ops cn10k_hw_ops = {
+ .send_cmd = cn10k_cpt_send_cmd,
+ .cpt_get_compcode = cn10k_cpt_get_compcode,
+ .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+};
+
+void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ u64 val = (lf->slot & 0x7FF);
+ u64 tar_addr = 0;
+
+ /* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
+ tar_addr |= (__force u64)lf->ioreg |
+ (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ resource_size_t size;
+ u64 lmt_base;
+
+ if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
+ cptpf->lfs.ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ cptpf->lfs.ops = &cn10k_hw_ops;
+ lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
+ if (!lmt_base) {
+ dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
+ return -ENOMEM;
+ }
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
+ cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
+ if (!cptpf->lfs.lmt_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF LMTLINE address failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ resource_size_t offset, size;
+
+ if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
+ cptvf->lfs.ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ cptvf->lfs.ops = &cn10k_hw_ops;
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map VF LMILINE region */
+ cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
+ if (!cptvf->lfs.lmt_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
new file mode 100644
index 000000000000..c091392b47e0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2021 Marvell.
+ */
+#ifndef __CN10K_CPT_H
+#define __CN10K_CPT_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptvf.h"
+
+static inline u8 cn10k_cpt_get_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn10k_cpt_res_s *)result)->compcode;
+}
+
+static inline u8 cn10k_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn10k_cpt_res_s *)result)->uc_compcode;
+}
+
+static inline u8 otx2_cpt_get_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn9k_cpt_res_s *)result)->compcode;
+}
+
+static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
+}
+
+void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
+int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+
+#endif /* __CN10K_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index ecedd91a8d85..c5445b05f53c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -25,6 +25,10 @@
#define OTX2_CPT_NAME_LENGTH 64
#define OTX2_CPT_DMA_MINALIGN 128
+/* HW capability flags */
+#define CN10K_MBOX 0
+#define CN10K_LMTST 1
+
#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
enum otx2_cpt_eng_type {
@@ -116,6 +120,25 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
}
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
+ return true;
+
+ return false;
+}
+
+static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
+ unsigned long *cap_flag)
+{
+ if (!is_dev_otx2(pdev)) {
+ __set_bit(CN10K_MBOX, cap_flag);
+ __set_bit(CN10K_LMTST, cap_flag);
+ }
+}
+
+
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
index ecafc42f37a2..6f947978e4e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -10,6 +10,8 @@
/* Device IDs */
#define OTX2_CPT_PCI_PF_DEVICE_ID 0xA0FD
#define OTX2_CPT_PCI_VF_DEVICE_ID 0xA0FE
+#define CN10K_CPT_PCI_PF_DEVICE_ID 0xA0F2
+#define CN10K_CPT_PCI_VF_DEVICE_ID 0xA0F3
/* Mailbox interrupts offset */
#define OTX2_CPT_PF_MBOX_INT 6
@@ -25,6 +27,7 @@
*/
#define OTX2_CPT_VF_MSIX_VECTORS 1
#define OTX2_CPT_VF_INTR_MBOX_MASK BIT(0)
+#define CN10K_CPT_VF_MBOX_REGION (0xC0000)
/* CPT LF MSIX vectors */
#define OTX2_CPT_LF_MSIX_VECTORS 2
@@ -135,7 +138,7 @@ enum otx2_cpt_comp_e {
OTX2_CPT_COMP_E_FAULT = 0x02,
OTX2_CPT_COMP_E_HWERR = 0x04,
OTX2_CPT_COMP_E_INSTERR = 0x05,
- OTX2_CPT_COMP_E_LAST_ENTRY = 0x06
+ OTX2_CPT_COMP_E_WARN = 0x06
};
/*
@@ -266,13 +269,22 @@ union otx2_cpt_inst_s {
union otx2_cpt_res_s {
u64 u[2];
- struct {
+ struct cn9k_cpt_res_s {
u64 compcode:8;
u64 uc_compcode:8;
u64 doneint:1;
u64 reserved_17_63:47;
u64 reserved_64_127;
} s;
+
+ struct cn10k_cpt_res_s {
+ u64 compcode:7;
+ u64 doneint:1;
+ u64 uc_compcode:8;
+ u64 rlen:16;
+ u64 spi:32;
+ u64 esn;
+ } cn10k;
};
/*
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 34aba1532761..c8350fcd60fa 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -379,9 +379,14 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
- lfs->lf[slot].lmtline = lfs->reg_base +
- OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
+ if (lfs->lmt_base)
+ lfs->lf[slot].lmtline = lfs->lmt_base +
+ (slot * LMTLINE_SIZE);
+ else
+ lfs->lf[slot].lmtline = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
+
lfs->lf[slot].ioreg = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
OTX2_CPT_LF_NQX(0));
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index ab1678fc564d..b691b6c1d5c4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -84,12 +84,22 @@ struct otx2_cptlf_info {
struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
};
+struct cpt_hw_ops {
+ void (*send_cmd)(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+ u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result);
+ u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result);
+};
+
struct otx2_cptlfs_info {
/* Registers start address of VF/PF LFs are attached to */
void __iomem *reg_base;
+#define LMTLINE_SIZE 128
+ void __iomem *lmt_base;
struct pci_dev *pdev; /* Device LFs are attached to */
struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
struct otx2_mbox *mbox;
+ struct cpt_hw_ops *ops;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index e19af1356f12..5ebba86c65d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -47,6 +47,7 @@ struct otx2_cptpf_dev {
struct workqueue_struct *flr_wq;
struct cptpf_flr_work *flr_work;
+ unsigned long cap_flag;
u8 pf_id; /* RVU PF number */
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 58f47e3ab62e..146a55ac4b9b 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -6,10 +6,11 @@
#include "otx2_cpt_common.h"
#include "otx2_cptpf_ucode.h"
#include "otx2_cptpf.h"
+#include "cn10k_cpt.h"
#include "rvu_reg.h"
-#define OTX2_CPT_DRV_NAME "octeontx2-cpt"
-#define OTX2_CPT_DRV_STRING "Marvell OcteonTX2 CPT Physical Function Driver"
+#define OTX2_CPT_DRV_NAME "rvu_cptpf"
+#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
@@ -62,45 +63,66 @@ static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
}
}
-static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
+static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
{
- /* Clear interrupt if any */
+ /* Clear FLR interrupt if any */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
- ~0x0ULL);
- otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
- ~0x0ULL);
+ INTR_MASK(num_vfs));
/* Enable VF FLR interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
+ RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
+ /* Clear ME interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
+ INTR_MASK(num_vfs));
+ /* Enable VF ME interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
+
+ if (num_vfs <= 64)
+ return;
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ INTR_MASK(num_vfs - 64));
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
+ INTR_MASK(num_vfs - 64));
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
+ RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
}
-static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
+static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
int vector;
/* Disable VF FLR interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
+ RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+
+ /* Disable VF ME interrupts */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
- RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
+ RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(vector, cptpf);
- /* Clear interrupt if any */
- otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
- ~0x0ULL);
- otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
- ~0x0ULL);
+ if (num_vfs <= 64)
+ return;
- vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
free_irq(vector, cptpf);
- if (num_vfs > 64) {
- vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
- free_irq(vector, cptpf);
- }
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(vector, cptpf);
}
static void cptpf_flr_wq_handler(struct work_struct *work)
@@ -172,11 +194,38 @@ static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ int reg, vf, num_reg = 1;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
- cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
+ cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
}
static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
@@ -202,6 +251,15 @@ static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
"IRQ registration failed for VFFLR0 irq\n");
goto free_mbox0_irq;
}
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
+ /* Register VF ME interrupt handler */
+ ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ goto free_flr0_irq;
+ }
+
if (num_vfs > 64) {
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
@@ -209,7 +267,7 @@ static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
if (ret) {
dev_err(dev,
"IRQ registration failed for PFVF mbox1 irq\n");
- goto free_flr0_irq;
+ goto free_me0_irq;
}
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
/* Register VF FLR interrupt handler */
@@ -220,15 +278,30 @@ static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
"IRQ registration failed for VFFLR1 irq\n");
goto free_mbox1_irq;
}
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_flr1_irq;
+ }
}
cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
- cptpf_enable_vf_flr_intrs(cptpf);
+ cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
return 0;
+free_flr1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
free_mbox1_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
free_irq(vector, cptpf);
+free_me0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(vector, cptpf);
free_flr0_irq:
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
free_irq(vector, cptpf);
@@ -284,7 +357,11 @@ static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
return -ENOMEM;
/* Map VF-PF mailbox memory */
- vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+ if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
+ else
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+
if (!vfpf_mbox_base) {
dev_err(dev, "VF-PF mailbox address not configured\n");
err = -ENOMEM;
@@ -365,6 +442,8 @@ static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
{
+ struct pci_dev *pdev = cptpf->pdev;
+ resource_size_t offset;
int err;
cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
@@ -373,8 +452,17 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
if (!cptpf->afpf_mbox_wq)
return -ENOMEM;
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ /* Map AF-PF mailbox memory */
+ cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
+ if (!cptpf->afpf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
- cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
+ pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
if (err)
goto error;
@@ -570,7 +658,7 @@ static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
if (ret)
goto disable_intr;
- ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
+ ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
if (ret)
goto disable_intr;
@@ -607,7 +695,6 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
- resource_size_t offset, size;
struct otx2_cptpf_dev *cptpf;
int err;
@@ -644,15 +731,6 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
if (err)
goto clear_drvdata;
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map AF-PF mailbox memory */
- cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
- if (!cptpf->afpf_mbox_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- err = -ENODEV;
- goto clear_drvdata;
- }
err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
if (err < 0) {
@@ -660,6 +738,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
RVU_PF_INT_VEC_CNT);
goto clear_drvdata;
}
+ otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
/* Initialize AF-PF mailbox */
err = cptpf_afpf_mbox_init(cptpf);
if (err)
@@ -671,6 +750,10 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+ err = cn10k_cptpf_lmtst_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
/* Initialize CPT PF device */
err = cptpf_device_init(cptpf);
if (err)
@@ -719,6 +802,7 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
/* Supported devices */
static const struct pci_device_id otx2_cpt_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
{ 0, } /* end of table */
};
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index a531f4c8b441..dff34b3ec09e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -16,6 +16,8 @@
#define LOADFVC_MAJOR_OP 0x01
#define LOADFVC_MINOR_OP 0x08
+#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
+
struct fw_info_t {
struct list_head ucodes;
};
@@ -666,7 +668,8 @@ static int reserve_engines(struct device *dev,
static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
{
if (ucode->va) {
- dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
+ dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
+ ucode->dma);
ucode->va = NULL;
ucode->dma = 0;
ucode->size = 0;
@@ -685,7 +688,7 @@ static int copy_ucode_to_dma_mem(struct device *dev,
u32 i;
/* Allocate DMAable space */
- ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
+ ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
GFP_KERNEL);
if (!ucode->va)
return -ENOMEM;
@@ -1100,11 +1103,12 @@ int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
return eng_grp_num;
}
-int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_eng_grps *eng_grps)
{
struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
int ret;
@@ -1180,6 +1184,23 @@ int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
eng_grps->is_grps_created = true;
cpt_ucode_release_fw(&fw_info);
+
+ if (is_dev_otx2(pdev))
+ return 0;
+ /*
+ * Configure engine group mask to allow context prefetching
+ * for the groups.
+ */
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
+ OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
+ BLKADDR_CPT0);
+ /*
+ * Set interval to periodically flush dirty data for the next
+ * CTX cache entry. Set the interval count to maximum supported
+ * value.
+ */
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
+ CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
return 0;
delete_eng_grp:
@@ -1460,9 +1481,10 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
- otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
- while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
+ while (lfs->ops->cpt_get_compcode(result) ==
+ OTX2_CPT_COMPLETION_CODE_INIT)
cpu_relax();
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index 6b0d432de0af..fe019ab730b2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -23,11 +23,13 @@
/* Microcode version string length */
#define OTX2_CPT_UCODE_VER_STR_SZ 44
-/* Maximum number of supported engines/cores on OcteonTX2 platform */
-#define OTX2_CPT_MAX_ENGINES 128
+/* Maximum number of supported engines/cores on OcteonTX2/CN10K platform */
+#define OTX2_CPT_MAX_ENGINES 144
#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
+#define OTX2_CPT_UCODE_SZ (64 * 1024)
+
/* Microcode types */
enum otx2_cpt_ucode_type {
OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
@@ -153,7 +155,7 @@ int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
-int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_eng_grps *eng_grps);
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
index 4f0a169fddbd..4207e2236903 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -19,11 +19,14 @@ struct otx2_cptvf_dev {
struct otx2_mbox pfvf_mbox;
struct work_struct pfvf_mbox_work;
struct workqueue_struct *pfvf_mbox_wq;
+ void *bbuf_base;
+ unsigned long cap_flag;
};
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
+int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev);
#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 47f378731024..3411e664cf50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -5,9 +5,10 @@
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "otx2_cptvf_algs.h"
+#include "cn10k_cpt.h"
#include <rvu_reg.h>
-#define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf"
+#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
{
@@ -70,6 +71,8 @@ static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
{
+ struct pci_dev *pdev = cptvf->pdev;
+ resource_size_t offset, size;
int ret;
cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
@@ -78,14 +81,39 @@ static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
if (!cptvf->pfvf_mbox_wq)
return -ENOMEM;
+ if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ cptvf->pfvf_mbox_base = cptvf->reg_base +
+ CN10K_CPT_VF_MBOX_REGION;
+ } else {
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map PF-VF mailbox memory */
+ cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
+ size);
+ if (!cptvf->pfvf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ ret = -ENOMEM;
+ goto free_wqe;
+ }
+ }
+
ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
- cptvf->pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
+ pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
if (ret)
goto free_wqe;
+ ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
+ if (ret)
+ goto destroy_mbox;
+
INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
return 0;
+destroy_mbox:
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
free_wqe:
destroy_workqueue(cptvf->pfvf_mbox_wq);
return ret;
@@ -305,7 +333,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
- resource_size_t offset, size;
struct otx2_cptvf_dev *cptvf;
int ret;
@@ -337,15 +364,12 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map PF-VF mailbox memory */
- cptvf->pfvf_mbox_base = devm_ioremap_wc(dev, offset, size);
- if (!cptvf->pfvf_mbox_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- ret = -ENODEV;
+ otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
+
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
goto clear_drvdata;
- }
+
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
@@ -392,6 +416,7 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
/* Supported devices */
static const struct pci_device_id otx2_cptvf_id_table[] = {
{PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
+ {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
{ 0, } /* end of table */
};
@@ -405,6 +430,6 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
MODULE_AUTHOR("Marvell");
-MODULE_DESCRIPTION("Marvell OcteonTX2 CPT Virtual Function Driver");
+MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 5d73b711cba6..02cb9e44afd8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -5,6 +5,48 @@
#include "otx2_cptvf.h"
#include <rvu_reg.h>
+int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *otx2_mbox;
+
+ cptvf->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!cptvf->bbuf_base)
+ return -ENOMEM;
+ /*
+ * Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &cptvf->pfvf_mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = cptvf->bbuf_base;
+
+ return 0;
+}
+
+static void otx2_cpt_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptvf_dev *cptvf = arg;
@@ -106,6 +148,7 @@ void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
pfvf_mbox = &cptvf->pfvf_mbox;
+ otx2_cpt_sync_mbox_bbuf(pfvf_mbox, 0);
mdev = &pfvf_mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
if (rsp_hdr->num_msgs == 0)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index d5c1c1b7c7e4..811ded72ce5f 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -320,7 +320,7 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
cpt_req->dlen, false);
/* Send CPT command */
- otx2_cpt_send_cmd(&cptinst, 1, lf);
+ lf->lfs->ops->send_cmd(&cptinst, 1, lf);
/*
* We allocate and prepare pending queue entry in critical section
@@ -349,13 +349,14 @@ int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
&lfs->lf[cpu_num]);
}
-static int cpt_process_ccode(struct pci_dev *pdev,
+static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
union otx2_cpt_res_s *cpt_status,
struct otx2_cpt_inst_info *info,
u32 *res_code)
{
- u8 uc_ccode = cpt_status->s.uc_compcode;
- u8 ccode = cpt_status->s.compcode;
+ u8 uc_ccode = lfs->ops->cpt_get_uc_compcode(cpt_status);
+ u8 ccode = lfs->ops->cpt_get_compcode(cpt_status);
+ struct pci_dev *pdev = lfs->pdev;
switch (ccode) {
case OTX2_CPT_COMP_E_FAULT:
@@ -389,6 +390,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
return 1;
case OTX2_CPT_COMP_E_GOOD:
+ case OTX2_CPT_COMP_E_WARN:
/*
* Check microcode completion code, it is only valid
* when completion code is CPT_COMP_E::GOOD
@@ -426,7 +428,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
return 0;
}
-static inline void process_pending_queue(struct pci_dev *pdev,
+static inline void process_pending_queue(struct otx2_cptlfs_info *lfs,
struct otx2_cpt_pending_queue *pqueue)
{
struct otx2_cpt_pending_entry *resume_pentry = NULL;
@@ -436,6 +438,7 @@ static inline void process_pending_queue(struct pci_dev *pdev,
struct otx2_cpt_inst_info *info = NULL;
struct otx2_cpt_req_info *req = NULL;
struct crypto_async_request *areq;
+ struct pci_dev *pdev = lfs->pdev;
u32 res_code, resume_index;
while (1) {
@@ -476,7 +479,7 @@ static inline void process_pending_queue(struct pci_dev *pdev,
goto process_pentry;
}
- if (cpt_process_ccode(pdev, cpt_status, info, &res_code)) {
+ if (cpt_process_ccode(lfs, cpt_status, info, &res_code)) {
spin_unlock_bh(&pqueue->lock);
return;
}
@@ -529,7 +532,7 @@ process_pentry:
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
{
- process_pending_queue(wqe->lfs->pdev,
+ process_pending_queue(wqe->lfs,
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index cc8dd3072b8b..1491cbfbc071 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -264,8 +264,8 @@ static int nx842_validate_result(struct device *dev,
* @inlen: Length of input buffer
* @out: Pointer to output buffer
* @outlen: Length of output buffer
- * @wrkmem: ptr to buffer for working memory, size determined by
- * nx842_pseries_driver.workmem_size
+ * @wmem: ptr to buffer for working memory, size determined by
+ * nx842_pseries_driver.workmem_size
*
* Returns:
* 0 Success, output of length @outlen stored in the buffer at @out
@@ -393,8 +393,8 @@ unlock:
* @inlen: Length of input buffer
* @out: Pointer to output buffer
* @outlen: Length of output buffer
- * @wrkmem: ptr to buffer for working memory, size determined by
- * nx842_pseries_driver.workmem_size
+ * @wmem: ptr to buffer for working memory, size determined by
+ * nx842_pseries_driver.workmem_size
*
* Returns:
* 0 Success, output of length @outlen stored in the buffer at @out
@@ -513,7 +513,7 @@ unlock:
/**
* nx842_OF_set_defaults -- Set default (disabled) values for devdata
*
- * @devdata - struct nx842_devdata to update
+ * @devdata: struct nx842_devdata to update
*
* Returns:
* 0 on success
@@ -538,13 +538,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
* The status field indicates if the device is enabled when the status
* is 'okay'. Otherwise the device driver will be disabled.
*
- * @prop - struct property point containing the maxsyncop for the update
+ * @devdata: struct nx842_devdata to use for dev_info
+ * @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 - Device is available
* -ENODEV - Device is not available
*/
-static int nx842_OF_upd_status(struct property *prop)
+static int nx842_OF_upd_status(struct nx842_devdata *devdata,
+ struct property *prop)
{
const char *status = (const char *)prop->value;
@@ -571,8 +573,8 @@ static int nx842_OF_upd_status(struct property *prop)
* In this example, the maximum byte length of a scatter list is
* 0x0ff0 (4,080).
*
- * @devdata - struct nx842_devdata to update
- * @prop - struct property point containing the maxsyncop for the update
+ * @devdata: struct nx842_devdata to update
+ * @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 on success
@@ -619,8 +621,8 @@ static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
* 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
* elements.
*
- * @devdata - struct nx842_devdata to update
- * @prop - struct property point containing the maxsyncop for the update
+ * @devdata: struct nx842_devdata to update
+ * @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 on success
@@ -689,7 +691,6 @@ out:
}
/**
- *
* nx842_OF_upd -- Handle OF properties updates for the device.
*
* Set all properties from the OF tree. Optionally, a new property
@@ -758,7 +759,7 @@ static int nx842_OF_upd(struct property *new_prop)
goto out;
/* Perform property updates */
- ret = nx842_OF_upd_status(status);
+ ret = nx842_OF_upd_status(new_devdata, status);
if (ret)
goto error_out;
@@ -812,8 +813,7 @@ error_out:
*
* @np: notifier block
* @action: notifier action
- * @update: struct pSeries_reconfig_prop_update pointer if action is
- * PSERIES_UPDATE_PROPERTY
+ * @data: struct of_reconfig_data pointer
*
* Returns:
* NOTIFY_OK on success
@@ -1069,6 +1069,7 @@ static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
};
+MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
static struct vio_driver nx842_vio_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index d6314ea9ae89..0e440f704a8f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -88,7 +88,7 @@ static int cbc_aes_nx_crypt(struct skcipher_request *req,
memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index e7384d107573..3793885f928d 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -391,7 +391,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* update stats */
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
@@ -460,7 +460,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* update stats */
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 13f518802343..dfa3ad1a12f2 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -102,7 +102,7 @@ static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
@@ -118,7 +118,7 @@ static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
u8 iv[16];
- memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 7a729dc2bc17..502a565074e9 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -86,7 +86,7 @@ static int ecb_aes_nx_crypt(struct skcipher_request *req,
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index fc9baca13920..4a796318b430 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -382,7 +382,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
+ atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
&(nx_ctx->stats->aes_bytes));
processed += to_process;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 446f611726df..655361ba9107 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -660,8 +660,8 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
* @inlen: input buffer size
* @out: output buffer pointer
* @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
+ * @wmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
*
* Returns: see @nx842_powernv_exec()
*/
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index b0ad665e4bda..c3bebf0feabe 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -16,6 +16,11 @@
#include "nx_csbcpb.h"
#include "nx.h"
+struct sha256_state_be {
+ __be32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buf[SHA256_BLOCK_SIZE];
+};
static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{
@@ -36,7 +41,7 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
}
static int nx_sha256_init(struct shash_desc *desc) {
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
memset(sctx, 0, sizeof *sctx);
@@ -56,7 +61,7 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *out_sg;
@@ -175,7 +180,7 @@ out:
static int nx_sha256_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
@@ -245,7 +250,7 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
@@ -254,7 +259,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct sha256_state_be *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
@@ -268,8 +273,8 @@ struct shash_alg nx_shash_sha256_alg = {
.final = nx_sha256_final,
.export = nx_sha256_export,
.import = nx_sha256_import,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
+ .descsize = sizeof(struct sha256_state_be),
+ .statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index c29103a1a0b6..1ffb40d2c324 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -15,6 +15,11 @@
#include "nx_csbcpb.h"
#include "nx.h"
+struct sha512_state_be {
+ __be64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
+ u8 buf[SHA512_BLOCK_SIZE];
+};
static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{
@@ -36,7 +41,7 @@ static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
static int nx_sha512_init(struct shash_desc *desc)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
memset(sctx, 0, sizeof *sctx);
@@ -56,7 +61,7 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *out_sg;
@@ -178,7 +183,7 @@ out:
static int nx_sha512_final(struct shash_desc *desc, u8 *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
@@ -251,7 +256,7 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
@@ -260,7 +265,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct sha512_state_be *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
@@ -274,8 +279,8 @@ struct shash_alg nx_shash_sha512_alg = {
.final = nx_sha512_final,
.export = nx_sha512_export,
.import = nx_sha512_import,
- .descsize = sizeof(struct sha512_state),
- .statesize = sizeof(struct sha512_state),
+ .descsize = sizeof(struct sha512_state_be),
+ .statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
index 493f8490ff94..e64f7e36fb92 100644
--- a/drivers/crypto/nx/nx_csbcpb.h
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -140,8 +140,8 @@ struct cop_status_block {
u8 crb_seq_number;
u8 completion_code;
u8 completion_extension;
- u32 processed_byte_count;
- u64 address;
+ __be32 processed_byte_count;
+ __be64 address;
} __packed;
/* Nest accelerator workbook section 4.4 */
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index c9d38bcfd1c7..bc8631363d72 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -229,9 +229,8 @@ static int omap_des_hw_init(struct omap_des_dev *dd)
* It may be long delays between requests.
* Device might go to off mode to save power.
*/
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
- pm_runtime_put_noidle(dd->dev);
dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
return err;
}
@@ -994,9 +993,8 @@ static int omap_des_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
goto err_get;
}
@@ -1124,9 +1122,8 @@ static int omap_des_resume(struct device *dev)
{
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- pm_runtime_put_noidle(dev);
dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err);
return err;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ae0d320d3c60..dd53ad9987b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -372,7 +372,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
int err;
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
@@ -2244,7 +2244,7 @@ static int omap_sham_suspend(struct device *dev)
static int omap_sham_resume(struct device *dev)
{
- int err = pm_runtime_get_sync(dev);
+ int err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get sync: %d\n", err);
return err;
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
index b8f3463be6ef..7eb5daef4f88 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -24,7 +24,7 @@ struct icp_qat_fw_loader_hal_handle {
};
struct icp_qat_fw_loader_chip_info {
- bool sram_visible;
+ int mmp_sram_size;
bool nn;
bool lm2lm3;
u32 lm_size;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index bd3028126cbe..12ca6b8764aa 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -696,7 +696,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
case ADF_4XXX_PCI_DEVICE_ID:
- handle->chip_info->sram_visible = false;
+ handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
@@ -730,7 +730,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
break;
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
- handle->chip_info->sram_visible = false;
+ handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = true;
handle->chip_info->lm2lm3 = false;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
@@ -763,7 +763,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
+ LOCAL_TO_XFER_REG_OFFSET);
break;
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
- handle->chip_info->sram_visible = true;
+ handle->chip_info->mmp_sram_size = 0x40000;
handle->chip_info->nn = true;
handle->chip_info->lm2lm3 = false;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
@@ -800,7 +800,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
goto out_err;
}
- if (handle->chip_info->sram_visible) {
+ if (handle->chip_info->mmp_sram_size > 0) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
@@ -1417,7 +1417,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
return -EINVAL;
}
- qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+ status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+ if (status) {
+ pr_err("QAT: failed to read register");
+ return status;
+ }
gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
data16low = 0xffff & data;
data16hi = 0xffff & (data >> 0x10);
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 1fb5fc852f6b..2026cc6be8f0 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -342,7 +342,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
return 0;
}
-#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
@@ -1546,15 +1545,14 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
int status = 0;
if (handle->chip_info->fw_auth) {
- if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
+ status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
+ if (!status)
status = qat_uclo_auth_fw(handle, desc);
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
- if (!handle->chip_info->sram_visible) {
- dev_dbg(&handle->pci_dev->dev,
- "QAT MMP fw not loaded for device 0x%x",
- handle->pci_dev->device);
- return status;
+ if (handle->chip_info->mmp_sram_size < mem_size) {
+ pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
}
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 14ade8a7d664..2cf8984e1b85 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -6,3 +6,4 @@ qcrypto-objs := core.o \
qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_AEAD) += aead.o
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
new file mode 100644
index 000000000000..290e2446a2f3
--- /dev/null
+++ b/drivers/crypto/qce/aead.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <crypto/gcm.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/scatterwalk.h>
+#include "aead.h"
+
+#define CCM_NONCE_ADATA_SHIFT 6
+#define CCM_NONCE_AUTHSIZE_SHIFT 3
+#define MAX_CCM_ADATA_HEADER_LEN 6
+
+static LIST_HEAD(aead_algs);
+
+static void qce_aead_done(void *data)
+{
+ struct crypto_async_request *async_req = data;
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
+ enum dma_data_direction dir_src, dir_dst;
+ bool diff_dst;
+ int error;
+ u32 status;
+ unsigned int totallen;
+ unsigned char tag[SHA256_DIGEST_SIZE] = {0};
+ int ret = 0;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ error = qce_dma_terminate_all(&qce->dma);
+ if (error)
+ dev_dbg(qce->dev, "aead dma termination error (%d)\n",
+ error);
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+ if (IS_CCM(rctx->flags)) {
+ if (req->assoclen) {
+ sg_free_table(&rctx->src_tbl);
+ if (diff_dst)
+ sg_free_table(&rctx->dst_tbl);
+ } else {
+ if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
+ sg_free_table(&rctx->dst_tbl);
+ }
+ } else {
+ sg_free_table(&rctx->dst_tbl);
+ }
+
+ error = qce_check_status(qce, &status);
+ if (error < 0 && (error != -EBADMSG))
+ dev_err(qce->dev, "aead operation error (%x)\n", status);
+
+ if (IS_ENCRYPT(rctx->flags)) {
+ totallen = req->cryptlen + req->assoclen;
+ if (IS_CCM(rctx->flags))
+ scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
+ totallen, ctx->authsize, 1);
+ else
+ scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
+ totallen, ctx->authsize, 1);
+
+ } else if (!IS_CCM(rctx->flags)) {
+ totallen = req->cryptlen + req->assoclen - ctx->authsize;
+ scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
+ ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
+ if (ret) {
+ pr_err("Bad message error\n");
+ error = -EBADMSG;
+ }
+ }
+
+ qce->async_req_done(qce, error);
+}
+
+static struct scatterlist *
+qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+
+ sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+ return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
+}
+
+static struct scatterlist *
+qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+
+ sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
+ return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
+}
+
+static struct scatterlist *
+qce_aead_prepare_dst_buf(struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ struct scatterlist *sg, *msg_sg, __sg[2];
+ gfp_t gfp;
+ unsigned int assoclen = req->assoclen;
+ unsigned int totallen;
+ int ret;
+
+ totallen = rctx->cryptlen + assoclen;
+ rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
+ if (rctx->dst_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of dst SG.\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (IS_CCM(rctx->flags))
+ rctx->dst_nents += 2;
+ else
+ rctx->dst_nents += 1;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (IS_CCM(rctx->flags) && assoclen) {
+ /* Get the dst buffer */
+ msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
+
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
+ rctx->assoclen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto dst_tbl_free;
+ }
+ /* dst buffer */
+ sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto dst_tbl_free;
+ }
+ totallen = rctx->cryptlen + rctx->assoclen;
+ } else {
+ if (totallen) {
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
+ if (IS_ERR(sg))
+ goto dst_tbl_free;
+ }
+ }
+ if (IS_CCM(rctx->flags))
+ sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
+ else
+ sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
+
+ if (IS_ERR(sg))
+ goto dst_tbl_free;
+
+ sg_mark_end(sg);
+ rctx->dst_sg = rctx->dst_tbl.sgl;
+ rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
+
+ return sg;
+
+dst_tbl_free:
+ sg_free_table(&rctx->dst_tbl);
+ return sg;
+}
+
+static int
+qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
+{
+ struct scatterlist *sg, *msg_sg, __sg[2];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int assoclen = rctx->assoclen;
+ unsigned int adata_header_len, cryptlen, totallen;
+ gfp_t gfp;
+ bool diff_dst;
+ int ret;
+
+ if (IS_DECRYPT(rctx->flags))
+ cryptlen = rctx->cryptlen + ctx->authsize;
+ else
+ cryptlen = rctx->cryptlen;
+ totallen = cryptlen + req->assoclen;
+
+ /* Get the msg */
+ msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
+
+ rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
+ sizeof(unsigned char), GFP_ATOMIC);
+ if (!rctx->adata)
+ return -ENOMEM;
+
+ /*
+ * Format associated data (RFC3610 and NIST 800-38C)
+ * Even though specification allows for AAD to be up to 2^64 - 1 bytes,
+ * the assoclen field in aead_request is unsigned int and thus limits
+ * the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
+ * while forming the header for AAD.
+ */
+ if (assoclen < 0xff00) {
+ adata_header_len = 2;
+ *(__be16 *)rctx->adata = cpu_to_be16(assoclen);
+ } else {
+ adata_header_len = 6;
+ *(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
+ *(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
+ }
+
+ /* Copy the associated data */
+ if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
+ rctx->adata + adata_header_len,
+ assoclen) != assoclen)
+ return -EINVAL;
+
+ /* Pad associated data to block size */
+ rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+
+ if (diff_dst)
+ rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
+ else
+ rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
+ ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
+ if (ret)
+ return ret;
+
+ /* Associated Data */
+ sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
+ sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
+ rctx->assoclen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ /* src msg */
+ sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ if (!diff_dst) {
+ /*
+ * For decrypt, when src and dst buffers are same, there is already space
+ * in the buffer for padded 0's which is output in lieu of
+ * the MAC that is input. So skip the below.
+ */
+ if (!IS_DECRYPT(rctx->flags)) {
+ sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ }
+ }
+ sg_mark_end(sg);
+ rctx->src_sg = rctx->src_tbl.sgl;
+ totallen = cryptlen + rctx->assoclen;
+ rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
+
+ if (diff_dst) {
+ sg = qce_aead_prepare_dst_buf(req);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto err_free;
+ }
+ } else {
+ if (IS_ENCRYPT(rctx->flags))
+ rctx->dst_nents = rctx->src_nents + 1;
+ else
+ rctx->dst_nents = rctx->src_nents;
+ rctx->dst_sg = rctx->src_sg;
+ }
+
+ return 0;
+err_free:
+ sg_free_table(&rctx->src_tbl);
+ return ret;
+}
+
+static int qce_aead_prepare_buf(struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ struct scatterlist *sg;
+ bool diff_dst = (req->src != req->dst) ? true : false;
+ unsigned int totallen;
+
+ totallen = rctx->cryptlen + rctx->assoclen;
+
+ sg = qce_aead_prepare_dst_buf(req);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+ if (diff_dst) {
+ rctx->src_nents = sg_nents_for_len(req->src, totallen);
+ if (rctx->src_nents < 0) {
+ dev_err(qce->dev, "Invalid numbers of src SG.\n");
+ return -EINVAL;
+ }
+ rctx->src_sg = req->src;
+ } else {
+ rctx->src_nents = rctx->dst_nents - 1;
+ rctx->src_sg = rctx->dst_sg;
+ }
+ return 0;
+}
+
+static int qce_aead_ccm_prepare_buf(struct aead_request *req)
+{
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct scatterlist *sg;
+ bool diff_dst = (req->src != req->dst) ? true : false;
+ unsigned int cryptlen;
+
+ if (rctx->assoclen)
+ return qce_aead_ccm_prepare_buf_assoclen(req);
+
+ if (IS_ENCRYPT(rctx->flags))
+ return qce_aead_prepare_buf(req);
+
+ cryptlen = rctx->cryptlen + ctx->authsize;
+ if (diff_dst) {
+ rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
+ rctx->src_sg = req->src;
+ sg = qce_aead_prepare_dst_buf(req);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+ } else {
+ rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
+ rctx->src_sg = req->src;
+ rctx->dst_nents = rctx->src_nents;
+ rctx->dst_sg = rctx->src_sg;
+ }
+
+ return 0;
+}
+
+static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
+{
+ unsigned int msglen_size, ivsize;
+ u8 msg_len[4];
+ int i;
+
+ if (!rctx || !rctx->iv)
+ return -EINVAL;
+
+ msglen_size = rctx->iv[0] + 1;
+
+ /* Verify that msg len size is valid */
+ if (msglen_size < 2 || msglen_size > 8)
+ return -EINVAL;
+
+ ivsize = rctx->ivsize;
+
+ /*
+ * Clear the msglen bytes in IV.
+ * Else the h/w engine and nonce will use any stray value pending there.
+ */
+ if (!IS_CCM_RFC4309(rctx->flags)) {
+ for (i = 0; i < msglen_size; i++)
+ rctx->iv[ivsize - i - 1] = 0;
+ }
+
+ /*
+ * The crypto framework encodes cryptlen as unsigned int. Thus, even though
+ * spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
+ */
+ if (msglen_size > 4)
+ msglen_size = 4;
+
+ memcpy(&msg_len[0], &rctx->cryptlen, 4);
+
+ memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
+ if (rctx->assoclen)
+ rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
+ rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
+ CCM_NONCE_AUTHSIZE_SHIFT;
+ for (i = 0; i < msglen_size; i++)
+ rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
+
+ return 0;
+}
+
+static int
+qce_aead_async_req_handle(struct crypto_async_request *async_req)
+{
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ enum dma_data_direction dir_src, dir_dst;
+ bool diff_dst;
+ int dst_nents, src_nents, ret;
+
+ if (IS_CCM_RFC4309(rctx->flags)) {
+ memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
+ rctx->ccm_rfc4309_iv[0] = 3;
+ memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
+ memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->ccm_rfc4309_iv;
+ rctx->ivsize = AES_BLOCK_SIZE;
+ } else {
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_aead_ivsize(tfm);
+ }
+ if (IS_CCM_RFC4309(rctx->flags))
+ rctx->assoclen = req->assoclen - 8;
+ else
+ rctx->assoclen = req->assoclen;
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+ dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+ if (IS_CCM(rctx->flags)) {
+ ret = qce_aead_create_ccm_nonce(rctx, ctx);
+ if (ret)
+ return ret;
+ }
+ if (IS_CCM(rctx->flags))
+ ret = qce_aead_ccm_prepare_buf(req);
+ else
+ ret = qce_aead_prepare_buf(req);
+
+ if (ret)
+ return ret;
+ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (dst_nents < 0) {
+ ret = dst_nents;
+ goto error_free;
+ }
+
+ if (diff_dst) {
+ src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+ if (src_nents < 0) {
+ ret = src_nents;
+ goto error_unmap_dst;
+ }
+ } else {
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ src_nents = dst_nents;
+ else
+ src_nents = dst_nents - 1;
+ }
+
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
+ qce_aead_done, async_req);
+ if (ret)
+ goto error_unmap_src;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
+ if (ret)
+ goto error_terminate;
+
+ return 0;
+
+error_terminate:
+ qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+ if (diff_dst)
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+error_unmap_dst:
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+error_free:
+ if (IS_CCM(rctx->flags) && rctx->assoclen) {
+ sg_free_table(&rctx->src_tbl);
+ if (diff_dst)
+ sg_free_table(&rctx->dst_tbl);
+ } else {
+ sg_free_table(&rctx->dst_tbl);
+ }
+ return ret;
+}
+
+static int qce_aead_crypt(struct aead_request *req, int encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
+ unsigned int blocksize = crypto_aead_blocksize(tfm);
+
+ rctx->flags = tmpl->alg_flags;
+ rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+ if (encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - ctx->authsize;
+
+ /* CE does not handle 0 length messages */
+ if (!rctx->cryptlen) {
+ if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
+ ctx->need_fallback = true;
+ }
+
+ /* If fallback is needed, schedule and exit */
+ if (ctx->need_fallback) {
+ /* Reset need_fallback in case the same ctx is used for another transaction */
+ ctx->need_fallback = false;
+
+ aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ aead_request_set_callback(&rctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(&rctx->fallback_req, req->assoclen);
+
+ return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
+ crypto_aead_decrypt(&rctx->fallback_req);
+ }
+
+ /*
+ * CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
+ return -EINVAL;
+
+ /* RFC4309 supported AAD size 16 bytes/20 bytes */
+ if (IS_CCM_RFC4309(rctx->flags))
+ if (crypto_ipsec_check_assoclen(req->assoclen))
+ return -EINVAL;
+
+ return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_aead_encrypt(struct aead_request *req)
+{
+ return qce_aead_crypt(req, 1);
+}
+
+static int qce_aead_decrypt(struct aead_request *req)
+{
+ return qce_aead_crypt(req, 0);
+}
+
+static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
+
+ if (IS_CCM_RFC4309(flags)) {
+ if (keylen < QCE_CCM4309_SALT_SIZE)
+ return -EINVAL;
+ keylen -= QCE_CCM4309_SALT_SIZE;
+ memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
+ }
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
+ return -EINVAL;
+
+ ctx->enc_keylen = keylen;
+ ctx->auth_keylen = keylen;
+
+ memcpy(ctx->enc_key, key, keylen);
+ memcpy(ctx->auth_key, key, keylen);
+
+ if (keylen == AES_KEYSIZE_192)
+ ctx->need_fallback = true;
+
+ return IS_CCM_RFC4309(flags) ?
+ crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
+ crypto_aead_setkey(ctx->fallback, key, keylen);
+}
+
+static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_authenc_keys authenc_keys;
+ unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
+ u32 _key[6];
+ int err;
+
+ err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
+ if (err)
+ return err;
+
+ if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
+ authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ if (IS_DES(flags)) {
+ err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
+ if (err)
+ return err;
+ } else if (IS_3DES(flags)) {
+ err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
+ if (err)
+ return err;
+ /*
+ * The crypto engine does not support any two keys
+ * being the same for triple des algorithms. The
+ * verify_skcipher_des3_key does not check for all the
+ * below conditions. Schedule fallback in this case.
+ */
+ memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
+ if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+ !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+ !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+ ctx->need_fallback = true;
+ } else if (IS_AES(flags)) {
+ /* No random key sizes */
+ if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
+ authenc_keys.enckeylen != AES_KEYSIZE_192 &&
+ authenc_keys.enckeylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ if (authenc_keys.enckeylen == AES_KEYSIZE_192)
+ ctx->need_fallback = true;
+ }
+
+ ctx->enc_keylen = authenc_keys.enckeylen;
+ ctx->auth_keylen = authenc_keys.authkeylen;
+
+ memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
+
+ memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
+ memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
+
+ return crypto_aead_setkey(ctx->fallback, key, keylen);
+}
+
+static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
+
+ if (IS_CCM(flags)) {
+ if (authsize < 4 || authsize > 16 || authsize % 2)
+ return -EINVAL;
+ if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+
+ return crypto_aead_setauthsize(ctx->fallback, authsize);
+}
+
+static int qce_aead_init(struct crypto_aead *tfm)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->need_fallback = false;
+ ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qce_aead_reqctx) +
+ crypto_aead_reqsize(ctx->fallback));
+ return 0;
+}
+
+static void qce_aead_exit(struct crypto_aead *tfm)
+{
+ struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->fallback);
+}
+
+struct qce_aead_def {
+ unsigned long flags;
+ const char *name;
+ const char *drv_name;
+ unsigned int blocksize;
+ unsigned int chunksize;
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+};
+
+static const struct qce_aead_def aead_def[] = {
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
+ .name = "authenc(hmac(sha1),cbc(des))",
+ .drv_name = "authenc-hmac-sha1-cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .drv_name = "authenc-hmac-sha1-cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
+ .name = "authenc(hmac(sha256),cbc(des))",
+ .drv_name = "authenc-hmac-sha256-cbc-des-qce",
+ .blocksize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .drv_name = "authenc-hmac-sha256-cbc-3des-qce",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .drv_name = "authenc-hmac-sha256-cbc-aes-qce",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CCM,
+ .name = "ccm(aes)",
+ .drv_name = "ccm-aes-qce",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ {
+ .flags = QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
+ .name = "rfc4309(ccm(aes))",
+ .drv_name = "rfc4309-ccm-aes-qce",
+ .blocksize = 1,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+};
+
+static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl;
+ struct aead_alg *alg;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl)
+ return -ENOMEM;
+
+ alg = &tmpl->alg.aead;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+
+ alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
+ alg->ivsize = def->ivsize;
+ alg->maxauthsize = def->maxauthsize;
+ if (IS_CCM(def->flags))
+ alg->setkey = qce_aead_ccm_setkey;
+ else
+ alg->setkey = qce_aead_setkey;
+ alg->setauthsize = qce_aead_setauthsize;
+ alg->encrypt = qce_aead_encrypt;
+ alg->decrypt = qce_aead_decrypt;
+ alg->init = qce_aead_init;
+ alg->exit = qce_aead_exit;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->base.cra_ctxsize = sizeof(struct qce_aead_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ INIT_LIST_HEAD(&tmpl->entry);
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
+ tmpl->alg_flags = def->flags;
+ tmpl->qce = qce;
+
+ ret = crypto_register_aead(alg);
+ if (ret) {
+ kfree(tmpl);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ return ret;
+ }
+
+ list_add_tail(&tmpl->entry, &aead_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
+ return 0;
+}
+
+static void qce_aead_unregister(struct qce_device *qce)
+{
+ struct qce_alg_template *tmpl, *n;
+
+ list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
+ crypto_unregister_aead(&tmpl->alg.aead);
+ list_del(&tmpl->entry);
+ kfree(tmpl);
+ }
+}
+
+static int qce_aead_register(struct qce_device *qce)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
+ ret = qce_aead_register_one(&aead_def[i], qce);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ qce_aead_unregister(qce);
+ return ret;
+}
+
+const struct qce_algo_ops aead_ops = {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .register_algs = qce_aead_register,
+ .unregister_algs = qce_aead_unregister,
+ .async_req_handle = qce_aead_async_req_handle,
+};
diff --git a/drivers/crypto/qce/aead.h b/drivers/crypto/qce/aead.h
new file mode 100644
index 000000000000..efb8477cc088
--- /dev/null
+++ b/drivers/crypto/qce/aead.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Linaro Limited. All rights reserved.
+ */
+
+#ifndef _AEAD_H_
+#define _AEAD_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE 64
+#define QCE_CCM4309_SALT_SIZE 3
+
+struct qce_aead_ctx {
+ u8 enc_key[QCE_MAX_KEY_SIZE];
+ u8 auth_key[QCE_MAX_KEY_SIZE];
+ u8 ccm4309_salt[QCE_CCM4309_SALT_SIZE];
+ unsigned int enc_keylen;
+ unsigned int auth_keylen;
+ unsigned int authsize;
+ bool need_fallback;
+ struct crypto_aead *fallback;
+};
+
+struct qce_aead_reqctx {
+ unsigned long flags;
+ u8 *iv;
+ unsigned int ivsize;
+ int src_nents;
+ int dst_nents;
+ struct scatterlist result_sg;
+ struct scatterlist adata_sg;
+ struct sg_table dst_tbl;
+ struct sg_table src_tbl;
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg;
+ unsigned int cryptlen;
+ unsigned int assoclen;
+ unsigned char *adata;
+ u8 ccm_nonce[QCE_MAX_NONCE];
+ u8 ccmresult_buf[QCE_BAM_BURST_SIZE];
+ u8 ccm_rfc4309_iv[QCE_MAX_IV_SIZE];
+ struct aead_request fallback_req;
+};
+
+static inline struct qce_alg_template *to_aead_tmpl(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+
+ return container_of(alg, struct qce_alg_template, alg.aead);
+}
+
+extern const struct qce_algo_ops aead_ops;
+
+#endif /* _AEAD_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index dceb9579d87a..7c612ba5068f 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,6 +15,7 @@
#include "core.h"
#include "regs-v5.h"
#include "sha.h"
+#include "aead.h"
static inline u32 qce_read(struct qce_device *qce, u32 offset)
{
@@ -88,17 +89,20 @@ static void qce_setup_config(struct qce_device *qce)
qce_write(qce, REG_CONFIG, config);
}
-static inline void qce_crypto_go(struct qce_device *qce)
+static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+ if (result_dump)
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+ else
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
}
-#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
-static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
{
u32 cfg = 0;
- if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+ if (IS_CCM(flags) || IS_CMAC(flags))
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
else
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
@@ -116,15 +120,16 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
else if (IS_CMAC(flags))
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+ else if (IS_CCM(flags))
+ cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
if (IS_SHA1(flags) || IS_SHA256(flags))
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
- else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
- IS_CBC(flags) || IS_CTR(flags))
+ else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
- else if (IS_AES(flags) && IS_CCM(flags))
+ else if (IS_CCM(flags))
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
- else if (IS_AES(flags) && IS_CMAC(flags))
+ else if (IS_CMAC(flags))
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
@@ -133,13 +138,11 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
if (IS_CCM(flags))
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
- if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
- IS_CMAC(flags))
- cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
-
return cfg;
}
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
@@ -168,7 +171,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
qce_clear_array(qce, REG_AUTH_KEY0, 16);
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
- auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+ auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, digestsize);
}
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
@@ -196,7 +199,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
qce_write_array(qce, REG_AUTH_BYTECNT0,
(u32 *)rctx->byte_count, 2);
- auth_cfg = qce_auth_cfg(rctx->flags, 0);
+ auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
if (rctx->last_blk)
auth_cfg |= BIT(AUTH_LAST_SHIFT);
@@ -219,13 +222,13 @@ go_proc:
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
- qce_crypto_go(qce);
+ qce_crypto_go(qce, true);
return 0;
}
#endif
-#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
{
u32 cfg = 0;
@@ -271,7 +274,9 @@ static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
return cfg;
}
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
{
u8 swap[QCE_AES_IV_LENGTH];
@@ -380,7 +385,156 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);
- qce_crypto_go(qce);
+ qce_crypto_go(qce, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int len)
+{
+ u32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
+
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = be32_to_cpup((const __be32 *)s);
+ s += sizeof(u32);
+ d++;
+ }
+ return DIV_ROUND_UP(len, sizeof(u32));
+}
+
+static int qce_setup_regs_aead(struct crypto_async_request *async_req)
+{
+ struct aead_request *req = aead_request_cast(async_req);
+ struct qce_aead_reqctx *rctx = aead_request_ctx(req);
+ struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
+ u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
+ u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
+ u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
+ unsigned int enc_keylen = ctx->enc_keylen;
+ unsigned int auth_keylen = ctx->auth_keylen;
+ unsigned int enc_ivsize = rctx->ivsize;
+ unsigned int auth_ivsize = 0;
+ unsigned int enckey_words, enciv_words;
+ unsigned int authkey_words, authiv_words, authnonce_words;
+ unsigned long flags = rctx->flags;
+ u32 encr_cfg, auth_cfg, config, totallen;
+ u32 iv_last_word;
+
+ qce_setup_config(qce);
+
+ /* Write encryption key */
+ enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen);
+ qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words);
+
+ /* Write encryption iv */
+ enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv, enc_ivsize);
+ qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words);
+
+ if (IS_CCM(rctx->flags)) {
+ iv_last_word = enciv[enciv_words - 1];
+ qce_write(qce, REG_CNTR3_IV3, iv_last_word + 1);
+ qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, enciv_words);
+ qce_write(qce, REG_CNTR_MASK, ~0);
+ qce_write(qce, REG_CNTR_MASK0, ~0);
+ qce_write(qce, REG_CNTR_MASK1, ~0);
+ qce_write(qce, REG_CNTR_MASK2, ~0);
+ }
+
+ /* Clear authentication IV and KEY registers of previous values */
+ qce_clear_array(qce, REG_AUTH_IV0, 16);
+ qce_clear_array(qce, REG_AUTH_KEY0, 16);
+
+ /* Clear byte count */
+ qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+ /* Write authentication key */
+ authkey_words = qce_be32_to_cpu_array(authkey, ctx->auth_key, auth_keylen);
+ qce_write_array(qce, REG_AUTH_KEY0, (u32 *)authkey, authkey_words);
+
+ /* Write initial authentication IV only for HMAC algorithms */
+ if (IS_SHA_HMAC(rctx->flags)) {
+ /* Write default authentication iv */
+ if (IS_SHA1_HMAC(rctx->flags)) {
+ auth_ivsize = SHA1_DIGEST_SIZE;
+ memcpy(authiv, std_iv_sha1, auth_ivsize);
+ } else if (IS_SHA256_HMAC(rctx->flags)) {
+ auth_ivsize = SHA256_DIGEST_SIZE;
+ memcpy(authiv, std_iv_sha256, auth_ivsize);
+ }
+ authiv_words = auth_ivsize / sizeof(u32);
+ qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words);
+ } else if (IS_CCM(rctx->flags)) {
+ /* Write nonce for CCM algorithms */
+ authnonce_words = qce_be32_to_cpu_array(authnonce, rctx->ccm_nonce, QCE_MAX_NONCE);
+ qce_write_array(qce, REG_AUTH_INFO_NONCE0, authnonce, authnonce_words);
+ }
+
+ /* Set up ENCR_SEG_CFG */
+ encr_cfg = qce_encr_cfg(flags, enc_keylen);
+ if (IS_ENCRYPT(flags))
+ encr_cfg |= BIT(ENCODE_SHIFT);
+ qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+
+ /* Set up AUTH_SEG_CFG */
+ auth_cfg = qce_auth_cfg(rctx->flags, auth_keylen, ctx->authsize);
+ auth_cfg |= BIT(AUTH_LAST_SHIFT);
+ auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+ if (IS_ENCRYPT(flags)) {
+ if (IS_CCM(rctx->flags))
+ auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+ else
+ auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
+ } else {
+ if (IS_CCM(rctx->flags))
+ auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
+ else
+ auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+ }
+ qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+ totallen = rctx->cryptlen + rctx->assoclen;
+
+ /* Set the encryption size and start offset */
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen + ctx->authsize);
+ else
+ qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+ qce_write(qce, REG_ENCR_SEG_START, rctx->assoclen & 0xffff);
+
+ /* Set the authentication size and start offset */
+ qce_write(qce, REG_AUTH_SEG_SIZE, totallen);
+ qce_write(qce, REG_AUTH_SEG_START, 0);
+
+ /* Write total length */
+ if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
+ qce_write(qce, REG_SEG_SIZE, totallen + ctx->authsize);
+ else
+ qce_write(qce, REG_SEG_SIZE, totallen);
+
+ /* get little endianness */
+ config = qce_config_reg(qce, 1);
+ qce_write(qce, REG_CONFIG, config);
+
+ /* Start the process */
+ qce_crypto_go(qce, !IS_CCM(flags));
return 0;
}
@@ -397,6 +551,10 @@ int qce_start(struct crypto_async_request *async_req, u32 type)
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req);
#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+ case CRYPTO_ALG_TYPE_AEAD:
+ return qce_setup_regs_aead(async_req);
+#endif
default:
return -EINVAL;
}
@@ -419,6 +577,8 @@ int qce_check_status(struct qce_device *qce, u32 *status)
*/
if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
ret = -ENXIO;
+ else if (*status & BIT(MAC_FAILED_SHIFT))
+ ret = -EBADMSG;
return ret;
}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 3bc244bcca2d..02e63ad9f245 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -11,6 +11,7 @@
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/aead.h>
/* xts du size */
#define QCE_SECTOR_SIZE 512
@@ -51,9 +52,11 @@
#define QCE_MODE_CCM BIT(12)
#define QCE_MODE_MASK GENMASK(12, 8)
+#define QCE_MODE_CCM_RFC4309 BIT(13)
+
/* cipher encryption/decryption operations */
-#define QCE_ENCRYPT BIT(13)
-#define QCE_DECRYPT BIT(14)
+#define QCE_ENCRYPT BIT(30)
+#define QCE_DECRYPT BIT(31)
#define IS_DES(flags) (flags & QCE_ALG_DES)
#define IS_3DES(flags) (flags & QCE_ALG_3DES)
@@ -73,6 +76,7 @@
#define IS_CTR(mode) (mode & QCE_MODE_CTR)
#define IS_XTS(mode) (mode & QCE_MODE_XTS)
#define IS_CCM(mode) (mode & QCE_MODE_CCM)
+#define IS_CCM_RFC4309(mode) ((mode) & QCE_MODE_CCM_RFC4309)
#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT)
#define IS_DECRYPT(dir) (dir & QCE_DECRYPT)
@@ -85,6 +89,7 @@ struct qce_alg_template {
union {
struct skcipher_alg skcipher;
struct ahash_alg ahash;
+ struct aead_alg aead;
} alg;
struct qce_device *qce;
const u8 *hash_zero;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 80b75085c265..d3780be44a76 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -17,6 +17,7 @@
#include "core.h"
#include "cipher.h"
#include "sha.h"
+#include "aead.h"
#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
@@ -28,6 +29,9 @@ static const struct qce_algo_ops *qce_ops[] = {
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
+ &aead_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index c0a0d8c4fce1..8ff10928f581 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -72,7 +72,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
struct scatterlist *sg;
bool diff_dst;
gfp_t gfp;
- int ret;
+ int dst_nents, src_nents, ret;
rctx->iv = req->iv;
rctx->ivsize = crypto_skcipher_ivsize(skcipher);
@@ -123,21 +123,26 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;
- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (ret < 0)
+ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (dst_nents < 0) {
+ ret = dst_nents;
goto error_free;
+ }
if (diff_dst) {
- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (ret < 0)
+ src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+ if (src_nents < 0) {
+ ret = src_nents;
goto error_unmap_dst;
+ }
rctx->src_sg = req->src;
} else {
rctx->src_sg = rctx->dst_sg;
+ src_nents = dst_nents - 1;
}
- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
- rctx->dst_sg, rctx->dst_nents,
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
+ rctx->dst_sg, dst_nents,
qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 1c6929fb3a13..544d7040cfc5 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1698,7 +1698,6 @@ static void sa_aead_dma_in_callback(void *data)
size_t pl, ml;
int i;
int err = 0;
- u16 auth_len;
u32 *mdptr;
sa_sync_from_device(rxd);
@@ -1711,13 +1710,10 @@ static void sa_aead_dma_in_callback(void *data)
for (i = 0; i < (authsize / 4); i++)
mdptr[i + 4] = swab32(mdptr[i + 4]);
- auth_len = req->assoclen + req->cryptlen;
-
if (rxd->enc) {
scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1);
} else {
- auth_len -= authsize;
start -= authsize;
scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
0);
@@ -2300,9 +2296,9 @@ static int sa_dma_init(struct sa_crypto_data *dd)
dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
if (IS_ERR(dd->dma_rx2)) {
- dma_release_channel(dd->dma_rx1);
- return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
- "Unable to request rx2 DMA channel\n");
+ ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
+ "Unable to request rx2 DMA channel\n");
+ goto err_dma_rx2;
}
dd->dma_tx = dma_request_chan(dd->dev, "tx");
@@ -2323,28 +2319,31 @@ static int sa_dma_init(struct sa_crypto_data *dd)
if (ret) {
dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
ret);
- return ret;
+ goto err_dma_config;
}
ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
if (ret) {
dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
ret);
- return ret;
+ goto err_dma_config;
}
ret = dmaengine_slave_config(dd->dma_tx, &cfg);
if (ret) {
dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
ret);
- return ret;
+ goto err_dma_config;
}
return 0;
+err_dma_config:
+ dma_release_channel(dd->dma_tx);
err_dma_tx:
- dma_release_channel(dd->dma_rx1);
dma_release_channel(dd->dma_rx2);
+err_dma_rx2:
+ dma_release_channel(dd->dma_rx1);
return ret;
}
@@ -2385,10 +2384,8 @@ MODULE_DEVICE_TABLE(of, of_match);
static int sa_ul_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
int ret;
@@ -2397,9 +2394,18 @@ static int sa_ul_probe(struct platform_device *pdev)
if (!dev_data)
return -ENOMEM;
+ dev_data->match_data = of_device_get_match_data(dev);
+ if (!dev_data->match_data)
+ return -ENODEV;
+
+ saul_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(saul_base))
+ return PTR_ERR(saul_base);
+
sa_k3_dev = dev;
dev_data->dev = dev;
dev_data->pdev = pdev;
+ dev_data->base = saul_base;
platform_set_drvdata(pdev, dev_data);
dev_set_drvdata(sa_k3_dev, dev_data);
@@ -2408,26 +2414,16 @@ static int sa_ul_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
+ pm_runtime_disable(dev);
return ret;
}
sa_init_mem(dev_data);
ret = sa_dma_init(dev_data);
if (ret)
- goto disable_pm_runtime;
-
- match = of_match_node(of_match, dev->of_node);
- if (!match) {
- dev_err(dev, "No compatible match found\n");
- return -ENODEV;
- }
- dev_data->match_data = match->data;
+ goto destroy_dma_pool;
spin_lock_init(&dev_data->scid_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- saul_base = devm_ioremap_resource(dev, res);
-
- dev_data->base = saul_base;
if (!dev_data->match_data->skip_engine_control) {
u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
@@ -2454,9 +2450,9 @@ release_dma:
dma_release_channel(dev_data->dma_rx1);
dma_release_channel(dev_data->dma_tx);
+destroy_dma_pool:
dma_pool_destroy(dev_data->sc_pool);
-disable_pm_runtime:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -2467,6 +2463,8 @@ static int sa_ul_remove(struct platform_device *pdev)
{
struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
+ of_platform_depopulate(&pdev->dev);
+
sa_unregister_algos(&pdev->dev);
dma_release_channel(dev_data->dma_rx2);
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index ecb7412e84e3..51a6e1a42434 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1011,6 +1011,7 @@ static int hash_hw_final(struct ahash_request *req)
goto out;
}
} else if (req->nbytes == 0 && ctx->keylen > 0) {
+ ret = -EPERM;
dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
__func__);
goto out;
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index ec90b44fa0cd..3c158251a58b 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/npe.h>
@@ -679,6 +680,7 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
{
int i, found = 0;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct resource *res;
for (i = 0; i < NPE_COUNT; i++) {
@@ -711,6 +713,11 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
if (!found)
return -ENODEV;
+
+ /* Spawn crypto subdevice if using device tree */
+ if (IS_ENABLED(CONFIG_OF) && np)
+ devm_of_platform_populate(dev);
+
return 0;
}