diff options
Diffstat (limited to 'drivers')
39 files changed, 1671 insertions, 560 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 239eca4d6805..814b3d0ca7b7 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -63,7 +63,7 @@ config HW_RANDOM_AMD config HW_RANDOM_ATMEL tristate "Atmel Random Number Generator support" - depends on ARCH_AT91 && HAVE_CLK && OF + depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -87,7 +87,7 @@ config HW_RANDOM_BA431 config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ - ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC + ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -100,7 +100,7 @@ config HW_RANDOM_BCM2835 config HW_RANDOM_IPROC_RNG200 tristate "Broadcom iProc/STB RNG200 support" - depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB + depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the RNG200 @@ -165,7 +165,7 @@ config HW_RANDOM_IXP4XX config HW_RANDOM_OMAP tristate "OMAP Random Number Generator support" - depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3 + depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3 || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -179,7 +179,7 @@ config HW_RANDOM_OMAP config HW_RANDOM_OMAP3_ROM tristate "OMAP3 ROM Random Number Generator support" - depends on ARCH_OMAP3 + depends on ARCH_OMAP3 || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -298,7 +298,7 @@ config HW_RANDOM_INGENIC_TRNG config HW_RANDOM_NOMADIK tristate "ST-Ericsson Nomadik Random Number Generator support" - depends on ARCH_NOMADIK + depends on ARCH_NOMADIK || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c index 188854dd16a9..7df5e9f7519d 100644 --- a/drivers/char/hw_random/ixp4xx-rng.c +++ b/drivers/char/hw_random/ixp4xx-rng.c @@ -42,13 +42,11 @@ static int ixp4xx_rng_probe(struct platform_device *pdev) { void __iomem * rng_base; struct device *dev = &pdev->dev; - struct resource *res; if (!cpu_is_ixp46x()) /* includes IXP455 */ return -ENOSYS; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rng_base = devm_ioremap_resource(dev, res); + rng_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rng_base)) return PTR_ERR(rng_base); diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c index e446236e81f2..8bb30282ca46 100644 --- a/drivers/char/hw_random/meson-rng.c +++ b/drivers/char/hw_random/meson-rng.c @@ -54,9 +54,10 @@ static int meson_rng_probe(struct platform_device *pdev) if (IS_ERR(data->base)) return PTR_ERR(data->base); - data->core_clk = devm_clk_get(dev, "core"); + data->core_clk = devm_clk_get_optional(dev, "core"); if (IS_ERR(data->core_clk)) - data->core_clk = NULL; + return dev_err_probe(dev, PTR_ERR(data->core_clk), + "Failed to get core clock\n"); if (data->core_clk) { ret = clk_prepare_enable(data->core_clk); diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index 8ad7b515a51b..6c00ea008555 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -166,8 +166,13 @@ static int mtk_rng_runtime_resume(struct device *dev) return mtk_rng_init(&priv->rng); } -static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend, - mtk_rng_runtime_resume, NULL); +static const struct dev_pm_ops mtk_rng_pm_ops = { + SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend, + mtk_rng_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + #define MTK_RNG_PM_OPS (&mtk_rng_pm_ops) #else /* CONFIG_PM */ #define MTK_RNG_PM_OPS NULL diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c index 7c673afd7241..2beaa35c0d74 100644 --- a/drivers/char/hw_random/s390-trng.c +++ b/drivers/char/hw_random/s390-trng.c @@ -111,7 +111,7 @@ static ssize_t trng_counter_show(struct device *dev, #if IS_ENABLED(CONFIG_ARCH_RANDOM) u64 arch_counter = atomic64_read(&s390_arch_random_counter); - return snprintf(buf, PAGE_SIZE, + return sysfs_emit(buf, "trng: %llu\n" "hwrng: %llu\n" "arch: %llu\n" @@ -119,7 +119,7 @@ static ssize_t trng_counter_show(struct device *dev, dev_counter, hwrng_counter, arch_counter, dev_counter + hwrng_counter + arch_counter); #else - return snprintf(buf, PAGE_SIZE, + return sysfs_emit(buf, "trng: %llu\n" "hwrng: %llu\n" "total: %llu\n", diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index e313233ec6de..bf6275ffc4aa 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -1153,16 +1153,27 @@ static struct caam_akcipher_alg caam_rsa = { int caam_pkc_init(struct device *ctrldev) { struct caam_drv_private *priv = dev_get_drvdata(ctrldev); - u32 pk_inst; + u32 pk_inst, pkha; int err; init_done = false; /* Determine public key hardware accelerator presence. */ - if (priv->era < 10) + if (priv->era < 10) { pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; - else - pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; + } else { + pkha = rd_reg32(&priv->ctrl->vreg.pkha); + pk_inst = pkha & CHA_VER_NUM_MASK; + + /* + * Newer CAAMs support partially disabled functionality. If this is the + * case, the number is non-zero, but this bit is set to indicate that + * no encryption or decryption is supported. Only signing and verifying + * is supported. + */ + if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) + pk_inst = 0; + } /* Do not register algorithms if PKHA is not present. */ if (!pk_inst) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index af61f3a2c0d4..3738625c0250 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -322,6 +322,9 @@ struct version_regs { /* CHA Miscellaneous Information - AESA_MISC specific */ #define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT) +/* CHA Miscellaneous Information - PKHA_MISC specific */ +#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT) + /* * caam_perfmon - Performance Monitor/Secure Memory Status/ * CAAM Global Status/Component Version IDs diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 0d5576f6ad21..fe69053b2394 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -467,8 +467,8 @@ static int ccp_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; - kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, - "%s-q%u", ccp->name, cmd_q->id); + kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); if (IS_ERR(kthread)) { dev_err(dev, "error creating queue thread (%ld)\n", PTR_ERR(kthread)); @@ -477,7 +477,6 @@ static int ccp_init(struct ccp_device *ccp) } cmd_q->kthread = kthread; - wake_up_process(kthread); } dev_dbg(dev, "Enabling interrupts...\n"); diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7838f63bab32..7b73332d6aa1 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -950,8 +950,8 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; - kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, - "%s-q%u", ccp->name, cmd_q->id); + kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); if (IS_ERR(kthread)) { dev_err(dev, "error creating queue thread (%ld)\n", PTR_ERR(kthread)); @@ -960,7 +960,6 @@ static int ccp5_init(struct ccp_device *ccp) } cmd_q->kthread = kthread; - wake_up_process(kthread); } dev_dbg(dev, "Enabling interrupts...\n"); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 2ecb0e1f65d8..e09925d86bf3 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -134,7 +134,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); - case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); + case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); default: return 0; } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index e599ac6dc162..790fa9058a36 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -103,7 +103,8 @@ MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match); static void init_cc_cache_params(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); - u32 cache_params, ace_const, val, mask; + u32 cache_params, ace_const, val; + u64 mask; /* compute CC_AXIM_CACHE_PARAMS */ cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS)); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 369562d34d66..fed52ae516ba 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -233,6 +233,8 @@ #define QM_DBG_WRITE_LEN 1024 #define QM_DBG_TMP_BUF_LEN 22 #define QM_PCI_COMMAND_INVALID ~0 +#define QM_RESET_STOP_TX_OFFSET 1 +#define QM_RESET_STOP_RX_OFFSET 2 #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 @@ -883,6 +885,20 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data) return IRQ_HANDLED; } +static void qm_set_qp_disable(struct hisi_qp *qp, int offset) +{ + u32 *addr; + + if (qp->is_in_kernel) + return; + + addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; + *addr = 1; + + /* make sure setup is completed */ + mb(); +} + static irqreturn_t qm_aeq_irq(int irq, void *data) { struct hisi_qm *qm = data; @@ -2467,6 +2483,15 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp) return qp->sqe + sq_tail * qp->qm->sqe_size; } +static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) +{ + u64 *addr; + + /* Use last 64 bits of DUS to reset status. */ + addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; + *addr = 0; +} + static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) { struct device *dev = &qm->pdev->dev; @@ -2492,7 +2517,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) } qp = &qm->qp_array[qp_id]; - + hisi_qm_unset_hw_reset(qp); memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); qp->event_cb = NULL; @@ -2912,6 +2937,14 @@ static int hisi_qm_get_available_instances(struct uacce_device *uacce) return hisi_qm_get_free_qp_num(uacce->priv); } +static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) +{ + int i; + + for (i = 0; i < qm->qp_num; i++) + qm_set_qp_disable(&qm->qp_array[i], offset); +} + static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, unsigned long arg, struct uacce_queue *q) @@ -3094,7 +3127,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm) if (IS_ERR(uacce)) return PTR_ERR(uacce); - if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) { + if (uacce->flags & UACCE_DEV_SVA) { qm->use_sva = true; } else { /* only consider sva case */ @@ -3122,8 +3155,10 @@ static int qm_alloc_uacce(struct hisi_qm *qm) else mmio_page_nr = qm->db_interval / PAGE_SIZE; + /* Add one more page for device or qp status */ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT; + sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >> + PAGE_SHIFT; uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; @@ -3367,8 +3402,10 @@ void hisi_qm_uninit(struct hisi_qm *qm) qm_irq_unregister(qm); hisi_qm_pci_uninit(qm); - uacce_remove(qm->uacce); - qm->uacce = NULL; + if (qm->use_sva) { + uacce_remove(qm->uacce); + qm->uacce = NULL; + } up_write(&qm->qps_lock); } @@ -3682,11 +3719,13 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) if (qm->status.stop_reason == QM_SOFT_RESET || qm->status.stop_reason == QM_FLR) { + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); ret = qm_stop_started_qp(qm); if (ret < 0) { dev_err(dev, "Failed to stop started qp!\n"); goto err_unlock; } + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); } /* Mask eq and aeq irq */ @@ -4185,7 +4224,7 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val) return -EINVAL; } - ret = sscanf(buf, "%ld", val); + ret = sscanf(buf, "%lu", val); if (ret != QM_QOS_VAL_NUM) return -EINVAL; @@ -5045,6 +5084,8 @@ static int qm_controller_reset(struct hisi_qm *qm) ret = qm_controller_reset_prepare(qm); if (ret) { + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); clear_bit(QM_RST_SCHED, &qm->misc_ctl); return ret; } @@ -5131,6 +5172,8 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) ret = hisi_qm_stop(qm, QM_FLR); if (ret) { pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); return; } @@ -5314,9 +5357,14 @@ static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, atomic_set(&qm->status.flags, QM_STOP); cmd = QM_VF_PREPARE_FAIL; goto err_prepare; + } else { + goto out; } err_prepare: + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); +out: pci_save_state(pdev); ret = qm->ops->ping_pf(qm, cmd); if (ret) @@ -5777,9 +5825,11 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_irq_register; } - ret = qm_alloc_uacce(qm); - if (ret < 0) - dev_warn(dev, "fail to alloc uacce (%d)\n", ret); + if (qm->mode == UACCE_MODE_SVA) { + ret = qm_alloc_uacce(qm); + if (ret < 0) + dev_warn(dev, "fail to alloc uacce (%d)\n", ret); + } ret = hisi_qm_memory_init(qm); if (ret) @@ -5792,8 +5842,10 @@ int hisi_qm_init(struct hisi_qm *qm) return 0; err_alloc_uacce: - uacce_remove(qm->uacce); - qm->uacce = NULL; + if (qm->use_sva) { + uacce_remove(qm->uacce); + qm->uacce = NULL; + } err_irq_register: qm_irq_unregister(qm); err_pci_init: diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 7148201ce76e..873971ef9aee 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -218,7 +218,7 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_AVG_DELAY ", 0x28ull}, {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, - {"HZIP_COMSUMED_BYTE ", 0x38ull}, + {"HZIP_CONSUMED_BYTE ", 0x38ull}, {"HZIP_PRODUCED_BYTE ", 0x40ull}, {"HZIP_COMP_INF ", 0x70ull}, {"HZIP_PRE_OUT ", 0x78ull}, diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index aa4c7b2af3e2..d8e82d69745d 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -674,14 +674,12 @@ static int img_hash_digest(struct ahash_request *req) static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) { struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); - int err = -ENOMEM; ctx->fallback = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("img_hash: Could not load fallback driver.\n"); - err = PTR_ERR(ctx->fallback); - goto err; + return PTR_ERR(ctx->fallback); } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct img_hash_request_ctx) + @@ -689,9 +687,6 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) IMG_HASH_DMA_THRESHOLD); return 0; - -err: - return err; } static int img_hash_cra_md5_init(struct crypto_tfm *tfm) diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig index 00cf8f028cb9..7942b48dd55a 100644 --- a/drivers/crypto/keembay/Kconfig +++ b/drivers/crypto/keembay/Kconfig @@ -39,6 +39,25 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS Intel does not recommend use of CTS mode with AES/SM4. +config CRYPTO_DEV_KEEMBAY_OCS_ECC + tristate "Support for Intel Keem Bay OCS ECC HW acceleration" + depends on ARCH_KEEMBAY || COMPILE_TEST + depends on OF || COMPILE_TEST + depends on HAS_IOMEM + select CRYPTO_ECDH + select CRYPTO_ENGINE + help + Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) + Elliptic Curve Cryptography (ECC) hardware acceleration for use with + Crypto API. + + Provides OCS acceleration for ECDH-256 and ECDH-384. + + Say Y or M if you are compiling for the Intel Keem Bay SoC. The + module will be called keembay-ocs-ecc. + + If unsure, say N. + config CRYPTO_DEV_KEEMBAY_OCS_HCU tristate "Support for Intel Keem Bay OCS HCU HW acceleration" select CRYPTO_HASH diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/keembay/Makefile index aea03d4432c4..7c12c3c138bd 100644 --- a/drivers/crypto/keembay/Makefile +++ b/drivers/crypto/keembay/Makefile @@ -4,5 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o +obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o + obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c new file mode 100644 index 000000000000..679e6ae295e0 --- /dev/null +++ b/drivers/crypto/keembay/keembay-ocs-ecc.c @@ -0,0 +1,1017 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Keem Bay OCS ECC Crypto Driver. + * + * Copyright (C) 2019-2021 Intel Corporation + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/fips.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <crypto/ecc_curve.h> +#include <crypto/ecdh.h> +#include <crypto/engine.h> +#include <crypto/kpp.h> +#include <crypto/rng.h> + +#include <crypto/internal/ecc.h> +#include <crypto/internal/kpp.h> + +#define DRV_NAME "keembay-ocs-ecc" + +#define KMB_OCS_ECC_PRIORITY 350 + +#define HW_OFFS_OCS_ECC_COMMAND 0x00000000 +#define HW_OFFS_OCS_ECC_STATUS 0x00000004 +#define HW_OFFS_OCS_ECC_DATA_IN 0x00000080 +#define HW_OFFS_OCS_ECC_CX_DATA_OUT 0x00000100 +#define HW_OFFS_OCS_ECC_CY_DATA_OUT 0x00000180 +#define HW_OFFS_OCS_ECC_ISR 0x00000400 +#define HW_OFFS_OCS_ECC_IER 0x00000404 + +#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0) +#define HW_OCS_ECC_COMMAND_INS_BP BIT(0) + +#define HW_OCS_ECC_COMMAND_START_VAL BIT(0) + +#define OCS_ECC_OP_SIZE_384 BIT(8) +#define OCS_ECC_OP_SIZE_256 0 + +/* ECC Instruction : for ECC_COMMAND */ +#define OCS_ECC_INST_WRITE_AX (0x1 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_WRITE_AY (0x2 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_WRITE_BX_D (0x3 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_WRITE_BY_L (0x4 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_WRITE_P (0x5 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_WRITE_A (0x6 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_CALC_D_IDX_A (0x8 << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC << HW_OCS_ECC_COMMAND_INS_BP) +#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP) + +#define ECC_ENABLE_INTR 1 + +#define POLL_USEC 100 +#define TIMEOUT_USEC 10000 + +#define KMB_ECC_VLI_MAX_DIGITS ECC_CURVE_NIST_P384_DIGITS +#define KMB_ECC_VLI_MAX_BYTES (KMB_ECC_VLI_MAX_DIGITS \ + << ECC_DIGITS_TO_BYTES_SHIFT) + +#define POW_CUBE 3 + +/** + * struct ocs_ecc_dev - ECC device context + * @list: List of device contexts + * @dev: OCS ECC device + * @base_reg: IO base address of OCS ECC + * @engine: Crypto engine for the device + * @irq_done: IRQ done completion. + * @irq: IRQ number + */ +struct ocs_ecc_dev { + struct list_head list; + struct device *dev; + void __iomem *base_reg; + struct crypto_engine *engine; + struct completion irq_done; + int irq; +}; + +/** + * struct ocs_ecc_ctx - Transformation context. + * @engine_ctx: Crypto engine ctx. + * @ecc_dev: The ECC driver associated with this context. + * @curve: The elliptic curve used by this transformation. + * @private_key: The private key. + */ +struct ocs_ecc_ctx { + struct crypto_engine_ctx engine_ctx; + struct ocs_ecc_dev *ecc_dev; + const struct ecc_curve *curve; + u64 private_key[KMB_ECC_VLI_MAX_DIGITS]; +}; + +/* Driver data. */ +struct ocs_ecc_drv { + struct list_head dev_list; + spinlock_t lock; /* Protects dev_list. */ +}; + +/* Global variable holding the list of OCS ECC devices (only one expected). */ +static struct ocs_ecc_drv ocs_ecc = { + .dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock), +}; + +/* Get OCS ECC tfm context from kpp_request. */ +static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req) +{ + return kpp_tfm_ctx(crypto_kpp_reqtfm(req)); +} + +/* Converts number of digits to number of bytes. */ +static inline unsigned int digits_to_bytes(unsigned int n) +{ + return n << ECC_DIGITS_TO_BYTES_SHIFT; +} + +/* + * Wait for ECC idle i.e when an operation (other than write operations) + * is done. + */ +static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev) +{ + u32 value; + + return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS), + value, + !(value & HW_OCS_ECC_ISR_INT_STATUS_DONE), + POLL_USEC, TIMEOUT_USEC); +} + +static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size) +{ + iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL, + ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND); +} + +/* Direct write of u32 buffer to ECC engine with associated instruction. */ +static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev, + u32 op_size, + u32 inst, + const void *data_in, + size_t data_size) +{ + iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND); + + /* MMIO Write src uint32 to dst. */ + memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in, + data_size); +} + +/* Start OCS ECC operation and wait for its completion. */ +static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size, + u32 inst) +{ + reinit_completion(&ecc_dev->irq_done); + + iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER); + iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND); + + return wait_for_completion_interruptible(&ecc_dev->irq_done); +} + +/** + * ocs_ecc_read_cx_out() - Read the CX data output buffer. + * @dev: The OCS ECC device to read from. + * @cx_out: The buffer where to store the CX value. Must be at least + * @byte_count byte long. + * @byte_count: The amount of data to read. + */ +static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out, + size_t byte_count) +{ + memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT, + byte_count); +} + +/** + * ocs_ecc_read_cy_out() - Read the CX data output buffer. + * @dev: The OCS ECC device to read from. + * @cy_out: The buffer where to store the CY value. Must be at least + * @byte_count byte long. + * @byte_count: The amount of data to read. + */ +static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out, + size_t byte_count) +{ + memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT, + byte_count); +} + +static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx) +{ + if (tctx->ecc_dev) + return tctx->ecc_dev; + + spin_lock(&ocs_ecc.lock); + + /* Only a single OCS device available. */ + tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev, + list); + + spin_unlock(&ocs_ecc.lock); + + return tctx->ecc_dev; +} + +/* Do point multiplication using OCS ECC HW. */ +static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev, + struct ecc_point *result, + const struct ecc_point *point, + u64 *scalar, + const struct ecc_curve *curve) +{ + u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */ + u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ? + OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256; + size_t nbytes = digits_to_bytes(curve->g.ndigits); + int rc = 0; + + /* Generate random nbytes for Simple and Differential SCA protection. */ + rc = crypto_get_default_rng(); + if (rc) + return rc; + + rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes); + crypto_put_default_rng(); + if (rc) + return rc; + + /* Wait engine to be idle before starting new operation. */ + rc = ocs_ecc_wait_idle(ecc_dev); + if (rc) + return rc; + + /* Send ecc_start pulse as well as indicating operation size. */ + ocs_ecc_cmd_start(ecc_dev, op_size); + + /* Write ax param; Base point (Gx). */ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX, + point->x, nbytes); + + /* Write ay param; Base point (Gy). */ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY, + point->y, nbytes); + + /* + * Write the private key into DATA_IN reg. + * + * Since DATA_IN register is used to write different values during the + * computation private Key value is overwritten with + * side-channel-resistance value. + */ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D, + scalar, nbytes); + + /* Write operand by/l. */ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L, + sca, nbytes); + memzero_explicit(sca, sizeof(sca)); + + /* Write p = curve prime(GF modulus). */ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P, + curve->p, nbytes); + + /* Write a = curve coefficient. */ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A, + curve->a, nbytes); + + /* Make hardware perform the multiplication. */ + rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A); + if (rc) + return rc; + + /* Read result. */ + ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes); + ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes); + + return 0; +} + +/** + * kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW. + * @ecc_dev: The OCS ECC device to use. + * @scalar_out: Where to store the output scalar. + * @scalar_a: Input scalar operand 'a'. + * @scalar_b: Input scalar operand 'b' + * @curve: The curve on which the operation is performed. + * @ndigits: The size of the operands (in digits). + * @inst: The operation to perform (as an OCS ECC instruction). + * + * Return: 0 on success, negative error code otherwise. + */ +static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out, + const u64 *scalar_a, const u64 *scalar_b, + const struct ecc_curve *curve, + unsigned int ndigits, const u32 inst) +{ + u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ? + OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256; + size_t nbytes = digits_to_bytes(ndigits); + int rc; + + /* Wait engine to be idle before starting new operation. */ + rc = ocs_ecc_wait_idle(ecc_dev); + if (rc) + return rc; + + /* Send ecc_start pulse as well as indicating operation size. */ + ocs_ecc_cmd_start(ecc_dev, op_size); + + /* Write ax param (Base point (Gx).*/ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX, + scalar_a, nbytes); + + /* Write ay param Base point (Gy).*/ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY, + scalar_b, nbytes); + + /* Write p = curve prime(GF modulus).*/ + ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P, + curve->p, nbytes); + + /* Give instruction A.B or A+B to ECC engine. */ + rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst); + if (rc) + return rc; + + ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes); + + if (vli_is_zero(scalar_out, ndigits)) + return -EINVAL; + + return 0; +} + +/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */ +static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev, + const struct ecc_curve *curve, + struct ecc_point *pk) +{ + u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 }; + u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 }; + u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 }; + int rc; + + if (WARN_ON(pk->ndigits != curve->g.ndigits)) + return -EINVAL; + + /* Check 1: Verify key is not the zero point. */ + if (ecc_point_is_zero(pk)) + return -EINVAL; + + /* Check 2: Verify key is in the range [0, p-1]. */ + if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1) + return -EINVAL; + + if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1) + return -EINVAL; + + /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */ + + /* y^2 */ + /* Compute y^2 -> store in yy */ + rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits, + OCS_ECC_INST_CALC_A_MUL_B_MODP); + if (rc) + goto exit; + + /* x^3 */ + /* Assigning w = 3, used for calculating x^3. */ + w[0] = POW_CUBE; + /* Load the next stage.*/ + rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits, + OCS_ECC_INST_CALC_A_POW_B_MODP); + if (rc) + goto exit; + + /* Do a*x -> store in w. */ + rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve, + pk->ndigits, + OCS_ECC_INST_CALC_A_MUL_B_MODP); + if (rc) + goto exit; + + /* Do ax + b == w + b; store in w. */ + rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve, + pk->ndigits, + OCS_ECC_INST_CALC_A_ADD_B_MODP); + if (rc) + goto exit; + + /* x^3 + ax + b == x^3 + w -> store in w. */ + rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits, + OCS_ECC_INST_CALC_A_ADD_B_MODP); + if (rc) + goto exit; + + /* Compare y^2 == x^3 + a·x + b. */ + rc = vli_cmp(yy, w, pk->ndigits); + if (rc) + rc = -EINVAL; + +exit: + memzero_explicit(xxx, sizeof(xxx)); + memzero_explicit(yy, sizeof(yy)); + memzero_explicit(w, sizeof(w)); + + return rc; +} + +/* SP800-56A section 5.6.2.3.3 full verification */ +static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev, + const struct ecc_curve *curve, + struct ecc_point *pk) +{ + struct ecc_point *nQ; + int rc; + + /* Checks 1 through 3 */ + rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk); + if (rc) + return rc; + + /* Check 4: Verify that nQ is the zero point. */ + nQ = ecc_alloc_point(pk->ndigits); + if (!nQ) + return -ENOMEM; + + rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve); + if (rc) + goto exit; + + if (!ecc_point_is_zero(nQ)) + rc = -EINVAL; + +exit: + ecc_free_point(nQ); + + return rc; +} + +static int kmb_ecc_is_key_valid(const struct ecc_curve *curve, + const u64 *private_key, size_t private_key_len) +{ + size_t ndigits = curve->g.ndigits; + u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1}; + u64 res[KMB_ECC_VLI_MAX_DIGITS]; + + if (private_key_len != digits_to_bytes(ndigits)) + return -EINVAL; + + if (!private_key) + return -EINVAL; + + /* Make sure the private key is in the range [2, n-3]. */ + if (vli_cmp(one, private_key, ndigits) != -1) + return -EINVAL; + + vli_sub(res, curve->n, one, ndigits); + vli_sub(res, res, one, ndigits); + if (vli_cmp(res, private_key, ndigits) != 1) + return -EINVAL; + + return 0; +} + +/* + * ECC private keys are generated using the method of extra random bits, + * equivalent to that described in FIPS 186-4, Appendix B.4.1. + * + * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer + * than requested + * 0 <= c mod(n-1) <= n-2 and implies that + * 1 <= d <= n-1 + * + * This method generates a private key uniformly distributed in the range + * [1, n-1]. + */ +static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey) +{ + size_t nbytes = digits_to_bytes(curve->g.ndigits); + u64 priv[KMB_ECC_VLI_MAX_DIGITS]; + size_t nbits; + int rc; + + nbits = vli_num_bits(curve->n, curve->g.ndigits); + + /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */ + if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv)) + return -EINVAL; + + /* + * FIPS 186-4 recommends that the private key should be obtained from a + * RBG with a security strength equal to or greater than the security + * strength associated with N. + * + * The maximum security strength identified by NIST SP800-57pt1r4 for + * ECC is 256 (N >= 512). + * + * This condition is met by the default RNG because it selects a favored + * DRBG with a security strength of 256. + */ + if (crypto_get_default_rng()) + return -EFAULT; + + rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); + crypto_put_default_rng(); + if (rc) + goto cleanup; + + rc = kmb_ecc_is_key_valid(curve, priv, nbytes); + if (rc) + goto cleanup; + + ecc_swap_digits(priv, privkey, curve->g.ndigits); + +cleanup: + memzero_explicit(&priv, sizeof(priv)); + + return rc; +} + +static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm); + struct ecdh params; + int rc = 0; + + rc = crypto_ecdh_decode_key(buf, len, ¶ms); + if (rc) + goto cleanup; + + /* Ensure key size is not bigger then expected. */ + if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) { + rc = -EINVAL; + goto cleanup; + } + + /* Auto-generate private key is not provided. */ + if (!params.key || !params.key_size) { + rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key); + goto cleanup; + } + + rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key, + params.key_size); + if (rc) + goto cleanup; + + ecc_swap_digits((const u64 *)params.key, tctx->private_key, + tctx->curve->g.ndigits); +cleanup: + memzero_explicit(¶ms, sizeof(params)); + + if (rc) + tctx->curve = NULL; + + return rc; +} + +/* Compute shared secret. */ +static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx, + struct kpp_request *req) +{ + struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev; + const struct ecc_curve *curve = tctx->curve; + u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS]; + u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2]; + size_t copied, nbytes, pubk_len; + struct ecc_point *pk, *result; + int rc; + + nbytes = digits_to_bytes(curve->g.ndigits); + + /* Public key is a point, thus it has two coordinates */ + pubk_len = 2 * nbytes; + + /* Copy public key from SG list to pubk_buf. */ + copied = sg_copy_to_buffer(req->src, + sg_nents_for_len(req->src, pubk_len), + pubk_buf, pubk_len); + if (copied != pubk_len) + return -EINVAL; + + /* Allocate and initialize public key point. */ + pk = ecc_alloc_point(curve->g.ndigits); + if (!pk) + return -ENOMEM; + + ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits); + ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits); + + /* + * Check the public key for following + * Check 1: Verify key is not the zero point. + * Check 2: Verify key is in the range [1, p-1]. + * Check 3: Verify that y^2 == (x^3 + a·x + b) mod p + */ + rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk); + if (rc) + goto exit_free_pk; + + /* Allocate point for storing computed shared secret. */ + result = ecc_alloc_point(pk->ndigits); + if (!result) { + rc = -ENOMEM; + goto exit_free_pk; + } + + /* Calculate the shared secret.*/ + rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve); + if (rc) + goto exit_free_result; + + if (ecc_point_is_zero(result)) { + rc = -EFAULT; + goto exit_free_result; + } + + /* Copy shared secret from point to buffer. */ + ecc_swap_digits(result->x, shared_secret, result->ndigits); + + /* Request might ask for less bytes than what we have. */ + nbytes = min_t(size_t, nbytes, req->dst_len); + + copied = sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, nbytes), + shared_secret, nbytes); + + if (copied != nbytes) + rc = -EINVAL; + + memzero_explicit(shared_secret, sizeof(shared_secret)); + +exit_free_result: + ecc_free_point(result); + +exit_free_pk: + ecc_free_point(pk); + + return rc; +} + +/* Compute public key. */ +static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx, + struct kpp_request *req) +{ + const struct ecc_curve *curve = tctx->curve; + u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2]; + struct ecc_point *pk; + size_t pubk_len; + size_t copied; + int rc; + + /* Public key is a point, so it has double the digits. */ + pubk_len = 2 * digits_to_bytes(curve->g.ndigits); + + pk = ecc_alloc_point(curve->g.ndigits); + if (!pk) + return -ENOMEM; + + /* Public Key(pk) = priv * G. */ + rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key, + curve); + if (rc) + goto exit; + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) { + rc = -EAGAIN; + goto exit; + } + + /* Copy public key from point to buffer. */ + ecc_swap_digits(pk->x, pubk_buf, pk->ndigits); + ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits); + + /* Copy public key to req->dst. */ + copied = sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, pubk_len), + pubk_buf, pubk_len); + + if (copied != pubk_len) + rc = -EINVAL; + +exit: + ecc_free_point(pk); + + return rc; +} + +static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine, + void *areq) +{ + struct kpp_request *req = container_of(areq, struct kpp_request, base); + struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req); + struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev; + int rc; + + if (req->src) + rc = kmb_ecc_do_shared_secret(tctx, req); + else + rc = kmb_ecc_do_public_key(tctx, req); + + crypto_finalize_kpp_request(ecc_dev->engine, req, rc); + + return 0; +} + +static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req) +{ + struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req); + const struct ecc_curve *curve = tctx->curve; + + /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */ + if (!tctx->curve) + return -EINVAL; + + /* Ensure dst is present. */ + if (!req->dst) + return -EINVAL; + + /* Check the request dst is big enough to hold the public key. */ + if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits))) + return -EINVAL; + + /* 'src' is not supposed to be present when generate pubk is called. */ + if (req->src) + return -EINVAL; + + return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine, + req); +} + +static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req) +{ + struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req); + const struct ecc_curve *curve = tctx->curve; + + /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */ + if (!tctx->curve) + return -EINVAL; + + /* Ensure dst is present. */ + if (!req->dst) + return -EINVAL; + + /* Ensure src is present. */ + if (!req->src) + return -EINVAL; + + /* + * req->src is expected to the (other-side) public key, so its length + * must be 2 * coordinate size (in bytes). + */ + if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits)) + return -EINVAL; + + return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine, + req); +} + +static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id) +{ + memset(tctx, 0, sizeof(*tctx)); + + tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx); + + if (IS_ERR(tctx->ecc_dev)) { + pr_err("Failed to find the device : %ld\n", + PTR_ERR(tctx->ecc_dev)); + return PTR_ERR(tctx->ecc_dev); + } + + tctx->curve = ecc_get_curve(curve_id); + if (!tctx->curve) + return -EOPNOTSUPP; + + tctx->engine_ctx.op.prepare_request = NULL; + tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request; + tctx->engine_ctx.op.unprepare_request = NULL; + + return 0; +} + +static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm); + + return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256); +} + +static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) +{ + struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm); + + return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384); +} + +static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm) +{ + struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm); + + memzero_explicit(tctx->private_key, sizeof(*tctx->private_key)); +} + +static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm) +{ + struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm); + + /* Public key is made of two coordinates, so double the digits. */ + return digits_to_bytes(tctx->curve->g.ndigits) * 2; +} + +static struct kpp_alg ocs_ecdh_p256 = { + .set_secret = kmb_ocs_ecdh_set_secret, + .generate_public_key = kmb_ocs_ecdh_generate_public_key, + .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret, + .init = kmb_ocs_ecdh_nist_p256_init_tfm, + .exit = kmb_ocs_ecdh_exit_tfm, + .max_size = kmb_ocs_ecdh_max_size, + .base = { + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "ecdh-nist-p256-keembay-ocs", + .cra_priority = KMB_OCS_ECC_PRIORITY, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ocs_ecc_ctx), + }, +}; + +static struct kpp_alg ocs_ecdh_p384 = { + .set_secret = kmb_ocs_ecdh_set_secret, + .generate_public_key = kmb_ocs_ecdh_generate_public_key, + .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret, + .init = kmb_ocs_ecdh_nist_p384_init_tfm, + .exit = kmb_ocs_ecdh_exit_tfm, + .max_size = kmb_ocs_ecdh_max_size, + .base = { + .cra_name = "ecdh-nist-p384", + .cra_driver_name = "ecdh-nist-p384-keembay-ocs", + .cra_priority = KMB_OCS_ECC_PRIORITY, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ocs_ecc_ctx), + }, +}; + +static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id) +{ + struct ocs_ecc_dev *ecc_dev = dev_id; + u32 status; + + /* + * Read the status register and write it back to clear the + * DONE_INT_STATUS bit. + */ + status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR); + iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR); + + if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE)) + return IRQ_NONE; + + complete(&ecc_dev->irq_done); + + return IRQ_HANDLED; +} + +static int kmb_ocs_ecc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ocs_ecc_dev *ecc_dev; + int rc; + + ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL); + if (!ecc_dev) + return -ENOMEM; + + ecc_dev->dev = dev; + + platform_set_drvdata(pdev, ecc_dev); + + INIT_LIST_HEAD(&ecc_dev->list); + init_completion(&ecc_dev->irq_done); + + /* Get base register address. */ + ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ecc_dev->base_reg)) { + dev_err(dev, "Failed to get base address\n"); + rc = PTR_ERR(ecc_dev->base_reg); + goto list_del; + } + + /* Get and request IRQ */ + ecc_dev->irq = platform_get_irq(pdev, 0); + if (ecc_dev->irq < 0) { + rc = ecc_dev->irq; + goto list_del; + } + + rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler, + NULL, 0, "keembay-ocs-ecc", ecc_dev); + if (rc < 0) { + dev_err(dev, "Could not request IRQ\n"); + goto list_del; + } + + /* Add device to the list of OCS ECC devices. */ + spin_lock(&ocs_ecc.lock); + list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list); + spin_unlock(&ocs_ecc.lock); + + /* Initialize crypto engine. */ + ecc_dev->engine = crypto_engine_alloc_init(dev, 1); + if (!ecc_dev->engine) { + dev_err(dev, "Could not allocate crypto engine\n"); + goto list_del; + } + + rc = crypto_engine_start(ecc_dev->engine); + if (rc) { + dev_err(dev, "Could not start crypto engine\n"); + goto cleanup; + } + + /* Register the KPP algo. */ + rc = crypto_register_kpp(&ocs_ecdh_p256); + if (rc) { + dev_err(dev, + "Could not register OCS algorithms with Crypto API\n"); + goto cleanup; + } + + rc = crypto_register_kpp(&ocs_ecdh_p384); + if (rc) { + dev_err(dev, + "Could not register OCS algorithms with Crypto API\n"); + goto ocs_ecdh_p384_error; + } + + return 0; + +ocs_ecdh_p384_error: + crypto_unregister_kpp(&ocs_ecdh_p256); + +cleanup: + crypto_engine_exit(ecc_dev->engine); + +list_del: + spin_lock(&ocs_ecc.lock); + list_del(&ecc_dev->list); + spin_unlock(&ocs_ecc.lock); + + return rc; +} + +static int kmb_ocs_ecc_remove(struct platform_device *pdev) +{ + struct ocs_ecc_dev *ecc_dev; + + ecc_dev = platform_get_drvdata(pdev); + if (!ecc_dev) + return -ENODEV; + + crypto_unregister_kpp(&ocs_ecdh_p384); + crypto_unregister_kpp(&ocs_ecdh_p256); + + spin_lock(&ocs_ecc.lock); + list_del(&ecc_dev->list); + spin_unlock(&ocs_ecc.lock); + + crypto_engine_exit(ecc_dev->engine); + + return 0; +} + +/* Device tree driver match. */ +static const struct of_device_id kmb_ocs_ecc_of_match[] = { + { + .compatible = "intel,keembay-ocs-ecc", + }, + {} +}; + +/* The OCS driver is a platform device. */ +static struct platform_driver kmb_ocs_ecc_driver = { + .probe = kmb_ocs_ecc_probe, + .remove = kmb_ocs_ecc_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = kmb_ocs_ecc_of_match, + }, +}; +module_platform_driver(kmb_ocs_ecc_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver"); +MODULE_ALIAS_CRYPTO("ecdh-nist-p256"); +MODULE_ALIAS_CRYPTO("ecdh-nist-p384"); +MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs"); +MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs"); diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index f14aac532f53..5cd332880653 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -615,7 +615,6 @@ static struct platform_driver marvell_cesa = { }; module_platform_driver(marvell_cesa); -MODULE_ALIAS("platform:mv_crypto"); MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>"); MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index a72723455df7..877a948469bd 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -1274,6 +1274,7 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc) req->base.complete, req->base.data); aead_request_set_crypt(&rctx->fbk_req, req->src, req->dst, req->cryptlen, req->iv); + aead_request_set_ad(&rctx->fbk_req, req->assoclen); ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) : crypto_aead_decrypt(&rctx->fbk_req); } else { diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index 33d8e50dcbda..fa768f10635f 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ +#include <linux/iopoll.h> #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_pf2vf_msg.h> @@ -161,7 +162,36 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); } -static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) +static int adf_init_device(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + u32 status; + u32 csr; + int ret; + + addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2); + csr |= ADF_4XXX_PM_SOU; + ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_4XXX_PM_INIT_STATE, + ADF_4XXX_PM_POLL_DELAY_US, + ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr, + ADF_4XXX_PM_STATUS); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + + return ret; +} + +static int pfvf_comms_disabled(struct adf_accel_dev *accel_dev) { return 0; } @@ -215,6 +245,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; + hw_data->init_device = adf_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; hw_data->uof_get_num_objs = uof_get_num_objs; @@ -222,7 +253,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->set_msix_rttable = set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms; + hw_data->enable_pfvf_comms = pfvf_comms_disabled; hw_data->disable_iov = adf_disable_sriov; hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION; diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h index 4fe2a776293c..924bac6feb37 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -62,6 +62,16 @@ #define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) #define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) +/* Power management */ +#define ADF_4XXX_PM_POLL_DELAY_US 20 +#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC +#define ADF_4XXX_PM_STATUS (0x50A00C) +#define ADF_4XXX_PM_INTERRUPT (0x50A028) +#define ADF_4XXX_PM_DRV_ACTIVE BIT(20) +#define ADF_4XXX_PM_INIT_STATE BIT(21) +/* Power management source in ERRSOU2 and ERRMSK2 */ +#define ADF_4XXX_PM_SOU BIT(18) + /* Firmware Binaries */ #define ADF_4XXX_FW "qat_4xxx.bin" #define ADF_4XXX_MMP "qat_4xxx_mmp.bin" diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 3027c01bc89e..1fa690219d92 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - u32 i, ctr = 0; - - if (!self || !self->accel_mask) - return 0; - - for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) { - if (self->accel_mask & (1 << i)) - ctr++; - } - return ctr; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - u32 i, ctr = 0; - - if (!self || !self->ae_mask) - return 0; - - for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) { - if (self->ae_mask & (1 << i)) - ctr++; - } - return ctr; -} - static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C3XXX_PMISC_BAR; @@ -88,12 +60,12 @@ static u32 get_etr_bar_id(struct adf_hw_device_data *self) static u32 get_sram_bar_id(struct adf_hw_device_data *self) { - return 0; + return ADF_C3XXX_SRAM_BAR; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { - int aes = get_num_aes(self); + int aes = self->get_num_aes(self); if (aes == 6) return DEV_SKU_4; @@ -106,41 +78,6 @@ static const u32 *adf_get_arbiter_mapping(void) return thrd_to_arb_map; } -static u32 get_pf2vf_offset(u32 i) -{ - return ADF_C3XXX_PF2VF_OFFSET(i); -} - -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_hw_device_data *hw_device = accel_dev->hw_device; - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR]; - unsigned long accel_mask = hw_device->accel_mask; - unsigned long ae_mask = hw_device->ae_mask; - void __iomem *csr = misc_bar->virt_addr; - unsigned int val, i; - - /* Enable Accel Engine error detection & correction */ - for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { - val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i)); - val |= ADF_C3XXX_ENABLE_AE_ECC_ERR; - ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val); - val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i)); - val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR; - ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val); - } - - /* Enable shared memory error detection & correction */ - for_each_set_bit(i, &accel_mask, ADF_C3XXX_MAX_ACCELERATORS) { - val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i)); - val |= ADF_C3XXX_ERRSSMSH_EN; - ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val); - val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i)); - val |= ADF_C3XXX_ERRSSMSH_EN; - ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val); - } -} - static void adf_enable_ints(struct adf_accel_dev *accel_dev) { void __iomem *addr; @@ -154,13 +91,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) ADF_C3XXX_SMIA1_MASK); } -static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) -{ - spin_lock_init(&accel_dev->pf.vf2pf_ints_lock); - - return 0; -} - static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) { adf_gen2_cfg_iov_thds(accel_dev, enable, @@ -177,16 +107,16 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES; - hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->enable_error_correction = adf_gen2_enable_error_correction; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_accel_cap = adf_gen2_get_accel_cap; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; + hw_data->get_num_accels = adf_gen2_get_num_accels; + hw_data->get_num_aes = adf_gen2_get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; @@ -205,7 +135,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->enable_ints = adf_enable_ints; hw_data->reset_device = adf_reset_flr; hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; - hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset; + hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources; + hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts; + hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts; hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION; diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index 86ee02a86789..1b86f828725c 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -6,8 +6,7 @@ /* PCIe configuration space */ #define ADF_C3XXX_PMISC_BAR 0 #define ADF_C3XXX_ETR_BAR 1 -#define ADF_C3XXX_RX_RINGS_OFFSET 8 -#define ADF_C3XXX_TX_RINGS_MASK 0xFF +#define ADF_C3XXX_SRAM_BAR 0 #define ADF_C3XXX_MAX_ACCELERATORS 3 #define ADF_C3XXX_MAX_ACCELENGINES 6 #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 @@ -19,16 +18,6 @@ #define ADF_C3XXX_SMIA0_MASK 0xFFFF #define ADF_C3XXX_SMIA1_MASK 0x1 #define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC -/* Error detection and correction */ -#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) -#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) -#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28) -#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) -#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18) -#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10) -#define ADF_C3XXX_ERRSSMSH_EN BIT(3) - -#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) /* AE to function mapping */ #define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48 diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index b023c80873bb..0613db077689 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - u32 i, ctr = 0; - - if (!self || !self->accel_mask) - return 0; - - for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) { - if (self->accel_mask & (1 << i)) - ctr++; - } - return ctr; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - u32 i, ctr = 0; - - if (!self || !self->ae_mask) - return 0; - - for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) { - if (self->ae_mask & (1 << i)) - ctr++; - } - return ctr; -} - static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C62X_PMISC_BAR; @@ -93,7 +65,7 @@ static u32 get_sram_bar_id(struct adf_hw_device_data *self) static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { - int aes = get_num_aes(self); + int aes = self->get_num_aes(self); if (aes == 8) return DEV_SKU_2; @@ -108,41 +80,6 @@ static const u32 *adf_get_arbiter_mapping(void) return thrd_to_arb_map; } -static u32 get_pf2vf_offset(u32 i) -{ - return ADF_C62X_PF2VF_OFFSET(i); -} - -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_hw_device_data *hw_device = accel_dev->hw_device; - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR]; - unsigned long accel_mask = hw_device->accel_mask; - unsigned long ae_mask = hw_device->ae_mask; - void __iomem *csr = misc_bar->virt_addr; - unsigned int val, i; - - /* Enable Accel Engine error detection & correction */ - for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { - val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i)); - val |= ADF_C62X_ENABLE_AE_ECC_ERR; - ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val); - val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i)); - val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR; - ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val); - } - - /* Enable shared memory error detection & correction */ - for_each_set_bit(i, &accel_mask, ADF_C62X_MAX_ACCELERATORS) { - val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i)); - val |= ADF_C62X_ERRSSMSH_EN; - ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val); - val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i)); - val |= ADF_C62X_ERRSSMSH_EN; - ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val); - } -} - static void adf_enable_ints(struct adf_accel_dev *accel_dev) { void __iomem *addr; @@ -156,13 +93,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) ADF_C62X_SMIA1_MASK); } -static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) -{ - spin_lock_init(&accel_dev->pf.vf2pf_ints_lock); - - return 0; -} - static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) { adf_gen2_cfg_iov_thds(accel_dev, enable, @@ -179,16 +109,16 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES; - hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->enable_error_correction = adf_gen2_enable_error_correction; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_accel_cap = adf_gen2_get_accel_cap; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; + hw_data->get_num_accels = adf_gen2_get_num_accels; + hw_data->get_num_aes = adf_gen2_get_num_aes; hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; @@ -207,7 +137,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->enable_ints = adf_enable_ints; hw_data->reset_device = adf_reset_flr; hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; - hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset; + hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources; + hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts; + hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts; hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION; diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h index e6664bd20c91..68c3436bd3aa 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -7,8 +7,6 @@ #define ADF_C62X_SRAM_BAR 0 #define ADF_C62X_PMISC_BAR 1 #define ADF_C62X_ETR_BAR 2 -#define ADF_C62X_RX_RINGS_OFFSET 8 -#define ADF_C62X_TX_RINGS_MASK 0xFF #define ADF_C62X_MAX_ACCELERATORS 5 #define ADF_C62X_MAX_ACCELENGINES 10 #define ADF_C62X_ACCELERATORS_REG_OFFSET 16 @@ -20,16 +18,6 @@ #define ADF_C62X_SMIA0_MASK 0xFFFF #define ADF_C62X_SMIA1_MASK 0x1 #define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC -/* Error detection and correction */ -#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) -#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) -#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28) -#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) -#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18) -#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10) -#define ADF_C62X_ERRSSMSH_EN BIT(3) - -#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) /* AE to function mapping */ #define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80 diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 38c0af6d4e43..57d9ca08e611 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -42,13 +42,17 @@ struct adf_bar { resource_size_t base_addr; void __iomem *virt_addr; resource_size_t size; -} __packed; +}; + +struct adf_irq { + bool enabled; + char name[ADF_MAX_MSIX_VECTOR_NAME]; +}; struct adf_accel_msix { - struct msix_entry *entries; - char **names; + struct adf_irq *irqs; u32 num_entries; -} __packed; +}; struct adf_accel_pci { struct pci_dev *pci_dev; @@ -56,7 +60,7 @@ struct adf_accel_pci { struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; u8 revid; u8 sku; -} __packed; +}; enum dev_state { DEV_DOWN = 0, @@ -96,7 +100,7 @@ struct adf_hw_device_class { const char *name; const enum adf_device_type type; u32 instances; -} __packed; +}; struct arb_info { u32 arb_cfg; @@ -166,12 +170,18 @@ struct adf_hw_device_data { int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); const u32 *(*get_arb_mapping)(void); + int (*init_device)(struct adf_accel_dev *accel_dev); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); void (*enable_ints)(struct adf_accel_dev *accel_dev); void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev); + u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr); + void (*enable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr, + u32 vf_mask); + void (*disable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr, + u32 vf_mask); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); char *(*uof_get_name)(u32 obj_num); @@ -195,7 +205,7 @@ struct adf_hw_device_data { u8 num_logical_accel; u8 num_engines; u8 min_iov_compat_ver; -} __packed; +}; /* CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ @@ -251,7 +261,8 @@ struct adf_accel_dev { struct adf_accel_vf_info *vf_info; } pf; struct { - char *irq_name; + bool irq_enabled; + char irq_name[ADF_MAX_MSIX_VECTOR_NAME]; struct tasklet_struct pf2vf_bh_tasklet; struct mutex vf2pf_lock; /* protect CSR access */ struct completion iov_msg_completion; @@ -261,5 +272,5 @@ struct adf_accel_dev { }; bool is_vf; u32 accel_id; -} __packed; +}; #endif diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 4261749fae8d..2cc6622833c4 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -62,7 +62,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); void adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); -int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info); @@ -197,10 +196,11 @@ void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); +int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info); - +int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg); int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev); void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev); int adf_init_pf_wq(void); @@ -211,6 +211,11 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev); #else #define adf_sriov_configure NULL +static inline int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) +{ + return 0; +} + static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c index 9e560c7d4163..262bdc05dab4 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c @@ -4,6 +4,104 @@ #include "icp_qat_hw.h" #include <linux/pci.h> +#define ADF_GEN2_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) + +u32 adf_gen2_get_pf2vf_offset(u32 i) +{ + return ADF_GEN2_PF2VF_OFFSET(i); +} +EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset); + +u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr) +{ + u32 errsou3, errmsk3, vf_int_mask; + + /* Get the interrupt sources triggered by VFs */ + errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); + vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3); + + /* To avoid adding duplicate entries to work queue, clear + * vf_int_mask_sets bits that are already masked in ERRMSK register. + */ + errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); + vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3); + + return vf_int_mask; +} +EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources); + +void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) +{ + /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */ + if (vf_mask & 0xFFFF) { + u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) + & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); + } +} +EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts); + +void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) +{ + /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */ + if (vf_mask & 0xFFFF) { + u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) + | ADF_GEN2_ERR_MSK_VF2PF(vf_mask); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); + } +} +EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts); + +u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self) +{ + if (!self || !self->accel_mask) + return 0; + + return hweight16(self->accel_mask); +} +EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels); + +u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self) +{ + if (!self || !self->ae_mask) + return 0; + + return hweight32(self->ae_mask); +} +EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes); + +void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev) + [hw_data->get_misc_bar_id(hw_data)]; + unsigned long accel_mask = hw_data->accel_mask; + unsigned long ae_mask = hw_data->ae_mask; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for_each_set_bit(i, &ae_mask, hw_data->num_engines) { + val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i)); + val |= ADF_GEN2_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i)); + val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for_each_set_bit(i, &accel_mask, hw_data->num_accel) { + val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i)); + val |= ADF_GEN2_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i)); + val |= ADF_GEN2_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val); + } +} +EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction); + void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs) { diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h index 756b0ddfac5e..c169d704097d 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h @@ -22,6 +22,8 @@ #define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 #define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 #define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_GEN2_RX_RINGS_OFFSET 8 +#define ADF_GEN2_TX_RINGS_MASK 0xFF #define BUILD_RING_BASE_ADDR(addr, size) \ (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) @@ -125,6 +127,31 @@ do { \ #define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000)) #define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000)) +/* Error detection and correction */ +#define ADF_GEN2_AE_CTX_ENABLES(i) ((i) * 0x1000 + 0x20818) +#define ADF_GEN2_AE_MISC_CONTROL(i) ((i) * 0x1000 + 0x20960) +#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_GEN2_UERRSSMSH(i) ((i) * 0x4000 + 0x18) +#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10) +#define ADF_GEN2_ERRSSMSH_EN BIT(3) + + /* VF2PF interrupts */ +#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C) +#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8) +#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C) +#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC) +#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9) +#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9) + +u32 adf_gen2_get_pf2vf_offset(u32 i); +u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_bar); +void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask); +void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask); + +u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self); +u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self); +void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 60bc7b991d35..e3749e5817d9 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -79,6 +79,11 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) return -EFAULT; } + if (hw_data->init_device && hw_data->init_device(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n"); + return -EFAULT; + } + if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); return -EFAULT; diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index c678d5c531aa..40593c9449a2 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -16,46 +16,31 @@ #include "adf_transport_internal.h" #define ADF_MAX_NUM_VFS 32 -#define ADF_ERRSOU3 (0x3A000 + 0x0C) -#define ADF_ERRSOU5 (0x3A000 + 0xD8) -#define ADF_ERRMSK3 (0x3A000 + 0x1C) -#define ADF_ERRMSK5 (0x3A000 + 0xDC) -#define ADF_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9) -#define ADF_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16) static int adf_enable_msix(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 msix_num_entries = 1; + u32 msix_num_entries = hw_data->num_banks + 1; + int ret; if (hw_data->set_msix_rttable) hw_data->set_msix_rttable(accel_dev); - /* If SR-IOV is disabled, add entries for each bank */ - if (!accel_dev->pf.vf_info) { - int i; - - msix_num_entries += hw_data->num_banks; - for (i = 0; i < msix_num_entries; i++) - pci_dev_info->msix_entries.entries[i].entry = i; - } else { - pci_dev_info->msix_entries.entries[0].entry = - hw_data->num_banks; - } - - if (pci_enable_msix_exact(pci_dev_info->pci_dev, - pci_dev_info->msix_entries.entries, - msix_num_entries)) { - dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); - return -EFAULT; + ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries, + msix_num_entries, PCI_IRQ_MSIX); + if (unlikely(ret < 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to allocate %d MSI-X vectors\n", + msix_num_entries); + return ret; } return 0; } static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) { - pci_disable_msix(pci_dev_info->pci_dev); + pci_free_irq_vectors(pci_dev_info->pci_dev); } static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) @@ -80,22 +65,10 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_addr = pmisc->virt_addr; - u32 errsou3, errsou5, errmsk3, errmsk5; unsigned long vf_mask; /* Get the interrupt sources triggered by VFs */ - errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3); - errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5); - vf_mask = ADF_ERR_REG_VF2PF_L(errsou3); - vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5); - - /* To avoid adding duplicate entries to work queue, clear - * vf_int_mask_sets bits that are already masked in ERRMSK register. - */ - errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3); - errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5); - vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3); - vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5); + vf_mask = hw_data->get_vf2pf_sources(pmisc_addr); if (vf_mask) { struct adf_accel_vf_info *vf_info; @@ -135,13 +108,39 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) return IRQ_NONE; } +static void adf_free_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; + struct adf_etr_data *etr_data = accel_dev->transport; + int clust_irq = hw_data->num_banks; + int irq, i = 0; + + if (pci_dev_info->msix_entries.num_entries > 1) { + for (i = 0; i < hw_data->num_banks; i++) { + if (irqs[i].enabled) { + irq = pci_irq_vector(pci_dev_info->pci_dev, i); + irq_set_affinity_hint(irq, NULL); + free_irq(irq, &etr_data->banks[i]); + } + } + } + + if (irqs[i].enabled) { + irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); + free_irq(irq, accel_dev); + } +} + static int adf_request_irqs(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; struct adf_etr_data *etr_data = accel_dev->transport; - int ret, i = 0; + int clust_irq = hw_data->num_banks; + int ret, irq, i = 0; char *name; /* Request msix irq for all banks unless SR-IOV enabled */ @@ -150,105 +149,82 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev) struct adf_etr_bank_data *bank = &etr_data->banks[i]; unsigned int cpu, cpus = num_online_cpus(); - name = *(pci_dev_info->msix_entries.names + i); + name = irqs[i].name; snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, "qat%d-bundle%d", accel_dev->accel_id, i); - ret = request_irq(msixe[i].vector, - adf_msix_isr_bundle, 0, name, bank); + irq = pci_irq_vector(pci_dev_info->pci_dev, i); + if (unlikely(irq < 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to get IRQ number of device vector %d - %s\n", + i, name); + ret = irq; + goto err; + } + ret = request_irq(irq, adf_msix_isr_bundle, 0, + &name[0], bank); if (ret) { dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d for %s\n", - msixe[i].vector, name); - return ret; + "Failed to allocate IRQ %d for %s\n", + irq, name); + goto err; } cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; - irq_set_affinity_hint(msixe[i].vector, - get_cpu_mask(cpu)); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + irqs[i].enabled = true; } } /* Request msix irq for AE */ - name = *(pci_dev_info->msix_entries.names + i); + name = irqs[i].name; snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, "qat%d-ae-cluster", accel_dev->accel_id); - ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); + irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); + if (unlikely(irq < 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to get IRQ number of device vector %d - %s\n", + i, name); + ret = irq; + goto err; + } + ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev); if (ret) { dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d, for %s\n", - msixe[i].vector, name); - return ret; + "Failed to allocate IRQ %d for %s\n", irq, name); + goto err; } + irqs[i].enabled = true; + return ret; +err: + adf_free_irqs(accel_dev); return ret; } -static void adf_free_irqs(struct adf_accel_dev *accel_dev) -{ - struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct msix_entry *msixe = pci_dev_info->msix_entries.entries; - struct adf_etr_data *etr_data = accel_dev->transport; - int i = 0; - - if (pci_dev_info->msix_entries.num_entries > 1) { - for (i = 0; i < hw_data->num_banks; i++) { - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, &etr_data->banks[i]); - } - } - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, accel_dev); -} - -static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) +static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) { - int i; - char **names; - struct msix_entry *entries; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 msix_num_entries = 1; + struct adf_irq *irqs; /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - entries = kcalloc_node(msix_num_entries, sizeof(*entries), - GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); - if (!entries) + irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); + if (!irqs) return -ENOMEM; - names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); - if (!names) { - kfree(entries); - return -ENOMEM; - } - for (i = 0; i < msix_num_entries; i++) { - *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); - if (!(*(names + i))) - goto err; - } accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; - accel_dev->accel_pci_dev.msix_entries.entries = entries; - accel_dev->accel_pci_dev.msix_entries.names = names; + accel_dev->accel_pci_dev.msix_entries.irqs = irqs; return 0; -err: - for (i = 0; i < msix_num_entries; i++) - kfree(*(names + i)); - kfree(entries); - kfree(names); - return -ENOMEM; } -static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) +static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev) { - char **names = accel_dev->accel_pci_dev.msix_entries.names; - int i; - - kfree(accel_dev->accel_pci_dev.msix_entries.entries); - for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) - kfree(*(names + i)); - kfree(names); + kfree(accel_dev->accel_pci_dev.msix_entries.irqs); + accel_dev->accel_pci_dev.msix_entries.irqs = NULL; } static int adf_setup_bh(struct adf_accel_dev *accel_dev) @@ -287,7 +263,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev) adf_free_irqs(accel_dev); adf_cleanup_bh(accel_dev); adf_disable_msix(&accel_dev->accel_pci_dev); - adf_isr_free_msix_entry_table(accel_dev); + adf_isr_free_msix_vectors_data(accel_dev); } EXPORT_SYMBOL_GPL(adf_isr_resource_free); @@ -303,7 +279,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) { int ret; - ret = adf_isr_alloc_msix_entry_table(accel_dev); + ret = adf_isr_alloc_msix_vectors_data(accel_dev); if (ret) goto err_out; @@ -328,7 +304,7 @@ err_disable_msix: adf_disable_msix(&accel_dev->accel_pci_dev); err_free_msix_table: - adf_isr_free_msix_entry_table(accel_dev); + adf_isr_free_msix_vectors_data(accel_dev); err_out: return ret; diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 976b9ab7617c..59860bdaedb6 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -5,82 +5,51 @@ #include "adf_common_drv.h" #include "adf_pf2vf_msg.h" -#define ADF_DH895XCC_EP_OFFSET 0x3A000 -#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C) -#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9) -#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) -#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) - -static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - u32 vf_mask) -{ - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct adf_bar *pmisc = - &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; - void __iomem *pmisc_addr = pmisc->virt_addr; - u32 reg; - - /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */ - if (vf_mask & 0xFFFF) { - reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3); - reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); - } - - /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */ - if (vf_mask >> 16) { - reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5); - reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); - } -} +#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10 +#define ADF_PFVF_MSG_ACK_DELAY 2 +#define ADF_PFVF_MSG_ACK_MAX_RETRY 100 +#define ADF_PFVF_MSG_RETRY_DELAY 5 +#define ADF_PFVF_MSG_MAX_RETRIES 3 +#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \ + ADF_PFVF_MSG_ACK_MAX_RETRY + \ + ADF_PFVF_MSG_COLLISION_DETECT_DELAY) void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) { + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data); + struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id]; + void __iomem *pmisc_addr = pmisc->virt_addr; unsigned long flags; spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); - __adf_enable_vf2pf_interrupts(accel_dev, vf_mask); + hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask); spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); } -static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - u32 vf_mask) +void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct adf_bar *pmisc = - &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data); + struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id]; void __iomem *pmisc_addr = pmisc->virt_addr; - u32 reg; - - /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */ - if (vf_mask & 0xFFFF) { - reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) | - ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); - } - - /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */ - if (vf_mask >> 16) { - reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) | - ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); - } -} - -void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) -{ unsigned long flags; spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); - __adf_disable_vf2pf_interrupts(accel_dev, vf_mask); + hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask); spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); } -void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask) +void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, + u32 vf_mask) { + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data); + struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id]; + void __iomem *pmisc_addr = pmisc->virt_addr; + spin_lock(&accel_dev->pf.vf2pf_ints_lock); - __adf_disable_vf2pf_interrupts(accel_dev, vf_mask); + hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask); spin_unlock(&accel_dev->pf.vf2pf_ints_lock); } @@ -117,44 +86,33 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) mutex_lock(lock); - /* Check if PF2VF CSR is in use by remote function */ + /* Check if the PFVF CSR is in use by remote function */ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); if ((val & remote_in_use_mask) == remote_in_use_pattern) { dev_dbg(&GET_DEV(accel_dev), - "PF2VF CSR in use by remote function\n"); + "PFVF CSR in use by remote function\n"); ret = -EBUSY; goto out; } - /* Attempt to get ownership of PF2VF CSR */ msg &= ~local_in_use_mask; msg |= local_in_use_pattern; - ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); - - /* Wait in case remote func also attempting to get ownership */ - msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); - - val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); - if ((val & local_in_use_mask) != local_in_use_pattern) { - dev_dbg(&GET_DEV(accel_dev), - "PF2VF CSR in use by remote - collision detected\n"); - ret = -EBUSY; - goto out; - } - /* - * This function now owns the PV2VF CSR. The IN_USE_BY pattern must - * remain in the PF2VF CSR for all writes including ACK from remote - * until this local function relinquishes the CSR. Send the message - * by interrupting the remote. - */ + /* Attempt to get ownership of the PFVF CSR */ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); /* Wait for confirmation from remote func it received the message */ do { - msleep(ADF_IOV_MSG_ACK_DELAY); + msleep(ADF_PFVF_MSG_ACK_DELAY); val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); - } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); + } while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY)); + + if (val != msg) { + dev_dbg(&GET_DEV(accel_dev), + "Collision - PFVF CSR overwritten by remote function\n"); + ret = -EIO; + goto out; + } if (val & int_bit) { dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); @@ -162,7 +120,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) ret = -EIO; } - /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ + /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); out: mutex_unlock(lock); @@ -170,16 +128,17 @@ out: } /** - * adf_iov_putmsg() - send PF2VF message + * adf_iov_putmsg() - send PFVF message * @accel_dev: Pointer to acceleration device. * @msg: Message to send - * @vf_nr: VF number to which the message will be sent + * @vf_nr: VF number to which the message will be sent if on PF, ignored + * otherwise * - * Function sends a message from the PF to a VF + * Function sends a message through the PFVF channel * * Return: 0 on success, error code otherwise. */ -int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) +static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) { u32 count = 0; int ret; @@ -187,12 +146,77 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) do { ret = __adf_iov_putmsg(accel_dev, msg, vf_nr); if (ret) - msleep(ADF_IOV_MSG_RETRY_DELAY); - } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES)); + msleep(ADF_PFVF_MSG_RETRY_DELAY); + } while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES)); return ret; } +/** + * adf_send_pf2vf_msg() - send PF to VF message + * @accel_dev: Pointer to acceleration device + * @vf_nr: VF number to which the message will be sent + * @msg: Message to send + * + * This function allows the PF to send a message to a specific VF. + * + * Return: 0 on success, error code otherwise. + */ +static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg) +{ + return adf_iov_putmsg(accel_dev, msg, vf_nr); +} + +/** + * adf_send_vf2pf_msg() - send VF to PF message + * @accel_dev: Pointer to acceleration device + * @msg: Message to send + * + * This function allows the VF to send a message to the PF. + * + * Return: 0 on success, error code otherwise. + */ +int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg) +{ + return adf_iov_putmsg(accel_dev, msg, 0); +} + +/** + * adf_send_vf2pf_req() - send VF2PF request message + * @accel_dev: Pointer to acceleration device. + * @msg: Request message to send + * + * This function sends a message that requires a response from the VF to the PF + * and waits for a reply. + * + * Return: 0 on success, error code otherwise. + */ +static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg) +{ + unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT); + int ret; + + reinit_completion(&accel_dev->vf.iov_msg_completion); + + /* Send request from VF to PF */ + ret = adf_send_vf2pf_msg(accel_dev, msg); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to send request msg to PF\n"); + return ret; + } + + /* Wait for response */ + if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion, + timeout)) { + dev_err(&GET_DEV(accel_dev), + "PFVF request/response message timeout expired\n"); + return -EIO; + } + + return 0; +} + void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) { struct adf_accel_dev *accel_dev = vf_info->accel_dev; @@ -204,6 +228,11 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) /* Read message from the VF */ msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr)); + if (!(msg & ADF_VF2PF_INT)) { + dev_info(&GET_DEV(accel_dev), + "Spurious VF2PF interrupt, msg %X. Ignored\n", msg); + goto out; + } /* To ACK, clear the VF2PFINT bit */ msg &= ~ADF_VF2PF_INT; @@ -284,9 +313,10 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) goto err; } - if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr)) + if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp)) dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n"); +out: /* re-enable interrupt on PF from this VF */ adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr)); @@ -304,7 +334,7 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - if (vf->init && adf_iov_putmsg(accel_dev, msg, i)) + if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); } @@ -312,7 +342,6 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) { - unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 msg = 0; int ret; @@ -322,24 +351,13 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT; BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255); - reinit_completion(&accel_dev->vf.iov_msg_completion); - - /* Send request from VF to PF */ - ret = adf_iov_putmsg(accel_dev, msg, 0); + ret = adf_send_vf2pf_req(accel_dev, msg); if (ret) { dev_err(&GET_DEV(accel_dev), "Failed to send Compatibility Version Request.\n"); return ret; } - /* Wait for response */ - if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion, - timeout)) { - dev_err(&GET_DEV(accel_dev), - "IOV request/response message timeout expired\n"); - return -EIO; - } - /* Response from PF received, check compatibility */ switch (accel_dev->vf.compatible) { case ADF_PF2VF_VF_COMPATIBLE: @@ -378,3 +396,21 @@ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) return adf_vf2pf_request_version(accel_dev); } EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms); + +/** + * adf_enable_pf2vf_comms() - Function enables communication from pf to vf + * + * @accel_dev: Pointer to acceleration device virtual function. + * + * This function carries out the necessary steps to setup and start the PFVF + * communication channel, if any. + * + * Return: 0 on success, error code otherwise. + */ +int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) +{ + spin_lock_init(&accel_dev->pf.vf2pf_ints_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h index ffd43aa50b57..a7d8f8367345 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h @@ -90,13 +90,4 @@ /* VF->PF Compatible Version Request */ #define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22 -/* Collision detection */ -#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10 -#define ADF_IOV_MSG_ACK_DELAY 2 -#define ADF_IOV_MSG_ACK_MAX_RETRY 100 -#define ADF_IOV_MSG_RETRY_DELAY 5 -#define ADF_IOV_MSG_MAX_RETRIES 3 -#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \ - ADF_IOV_MSG_ACK_MAX_RETRY + \ - ADF_IOV_MSG_COLLISION_DETECT_DELAY) #endif /* ADF_IOV_MSG_H */ diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index 3e25fac051b2..8d11bb24cea0 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -17,7 +17,7 @@ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev) u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); - if (adf_iov_putmsg(accel_dev, msg, 0)) { + if (adf_send_vf2pf_msg(accel_dev, msg)) { dev_err(&GET_DEV(accel_dev), "Failed to send Init event to PF\n"); return -EFAULT; @@ -41,7 +41,7 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status)) - if (adf_iov_putmsg(accel_dev, msg, 0)) + if (adf_send_vf2pf_msg(accel_dev, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send Shutdown event to PF\n"); } diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 7828a6573f3e..db5e7abbe5f3 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -53,27 +53,22 @@ EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); static int adf_enable_msi(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - int stat = pci_enable_msi(pci_dev_info->pci_dev); - - if (stat) { + int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1, + PCI_IRQ_MSI); + if (unlikely(stat < 0)) { dev_err(&GET_DEV(accel_dev), - "Failed to enable MSI interrupts\n"); + "Failed to enable MSI interrupt: %d\n", stat); return stat; } - accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); - if (!accel_dev->vf.irq_name) - return -ENOMEM; - - return stat; + return 0; } static void adf_disable_msi(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); - kfree(accel_dev->vf.irq_name); - pci_disable_msi(pdev); + pci_free_irq_vectors(pdev); } static void adf_dev_stop_async(struct work_struct *work) @@ -101,6 +96,11 @@ static void adf_pf2vf_bh_handler(void *data) /* Read the message from PF */ msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); + if (!(msg & ADF_PF2VF_INT)) { + dev_info(&GET_DEV(accel_dev), + "Spurious PF2VF interrupt, msg %X. Ignored\n", msg); + goto out; + } if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) /* Ignore legacy non-system (non-kernel) PF2VF messages */ @@ -149,6 +149,7 @@ static void adf_pf2vf_bh_handler(void *data) msg &= ~ADF_PF2VF_INT; ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); +out: /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); return; @@ -240,6 +241,7 @@ static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) } cpu = accel_dev->accel_id % num_online_cpus(); irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); + accel_dev->vf.irq_enabled = true; return ret; } @@ -271,8 +273,10 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); - irq_set_affinity_hint(pdev->irq, NULL); - free_irq(pdev->irq, (void *)accel_dev); + if (accel_dev->vf.irq_enabled) { + irq_set_affinity_hint(pdev->irq, NULL); + free_irq(pdev->irq, accel_dev); + } adf_cleanup_bh(accel_dev); adf_cleanup_pf2vf_bh(accel_dev); adf_disable_msi(accel_dev); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 0a9ce365a544..8e2e1554dcf6 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -35,34 +35,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - u32 i, ctr = 0; - - if (!self || !self->accel_mask) - return 0; - - for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) { - if (self->accel_mask & (1 << i)) - ctr++; - } - return ctr; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - u32 i, ctr = 0; - - if (!self || !self->ae_mask) - return 0; - - for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) { - if (self->ae_mask & (1 << i)) - ctr++; - } - return ctr; -} - static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_PMISC_BAR; @@ -126,41 +98,6 @@ static const u32 *adf_get_arbiter_mapping(void) return thrd_to_arb_map; } -static u32 get_pf2vf_offset(u32 i) -{ - return ADF_DH895XCC_PF2VF_OFFSET(i); -} - -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_hw_device_data *hw_device = accel_dev->hw_device; - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; - unsigned long accel_mask = hw_device->accel_mask; - unsigned long ae_mask = hw_device->ae_mask; - void __iomem *csr = misc_bar->virt_addr; - unsigned int val, i; - - /* Enable Accel Engine error detection & correction */ - for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { - val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i)); - val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR; - ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val); - val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i)); - val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR; - ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val); - } - - /* Enable shared memory error detection & correction */ - for_each_set_bit(i, &accel_mask, ADF_DH895XCC_MAX_ACCELERATORS) { - val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i)); - val |= ADF_DH895XCC_ERRSSMSH_EN; - ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val); - val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i)); - val |= ADF_DH895XCC_ERRSSMSH_EN; - ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val); - } -} - static void adf_enable_ints(struct adf_accel_dev *accel_dev) { void __iomem *addr; @@ -175,11 +112,50 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) ADF_DH895XCC_SMIA1_MASK); } -static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) +static u32 get_vf2pf_sources(void __iomem *pmisc_bar) +{ + u32 errsou5, errmsk5, vf_int_mask; + + vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar); + + /* Get the interrupt sources triggered by VFs, but to avoid duplicates + * in the work queue, clear vf_int_mask_sets bits that are already + * masked in ERRMSK register. + */ + errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5); + errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5); + vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5); + vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5); + + return vf_int_mask; +} + +static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) { - spin_lock_init(&accel_dev->pf.vf2pf_ints_lock); + /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */ + adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask); - return 0; + /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */ + if (vf_mask >> 16) { + u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5) + & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask); + + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val); + } +} + +static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) +{ + /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */ + adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask); + + /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */ + if (vf_mask >> 16) { + u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5) + | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask); + + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val); + } } static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) @@ -198,16 +174,16 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; - hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->enable_error_correction = adf_gen2_enable_error_correction; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_accel_cap = get_accel_cap; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; + hw_data->get_num_accels = adf_gen2_get_num_accels; + hw_data->get_num_aes = adf_gen2_get_num_aes; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_admin_info = adf_gen2_get_admin_info; @@ -225,7 +201,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->reset_device = adf_reset_sbr; - hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset; + hw_data->get_vf2pf_sources = get_vf2pf_sources; + hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts; + hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts; hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms; hw_data->disable_iov = adf_disable_sriov; hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index f99319cd4543..0af34dd8708a 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -7,8 +7,6 @@ #define ADF_DH895XCC_SRAM_BAR 0 #define ADF_DH895XCC_PMISC_BAR 1 #define ADF_DH895XCC_ETR_BAR 2 -#define ADF_DH895XCC_RX_RINGS_OFFSET 8 -#define ADF_DH895XCC_TX_RINGS_MASK 0xFF #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 @@ -25,16 +23,10 @@ #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) #define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF #define ADF_DH895XCC_SMIA1_MASK 0x1 -/* Error detection and correction */ -#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) -#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) -#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28) -#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) -#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18) -#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) -#define ADF_DH895XCC_ERRSSMSH_EN BIT(3) -#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) +/* Masks for VF2PF interrupts */ +#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16) +#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16) /* AE to function mapping */ #define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96 diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 55aa3a71169b..7717e9e5977b 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -2171,6 +2171,8 @@ static int s5p_aes_probe(struct platform_device *pdev) variant = find_s5p_sss_version(pdev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; /* * Note: HASH and PRNG uses the same registers in secss, avoid diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 544d7040cfc5..bcbc38dc6ae8 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -2412,8 +2412,7 @@ static int sa_ul_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, - ret); + dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret); pm_runtime_disable(dev); return ret; } @@ -2435,16 +2434,16 @@ static int sa_ul_probe(struct platform_device *pdev) sa_register_algos(dev_data); - ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + ret = of_platform_populate(node, NULL, NULL, dev); if (ret) goto release_dma; - device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child); + device_for_each_child(dev, dev, sa_link_child); return 0; release_dma: - sa_unregister_algos(&pdev->dev); + sa_unregister_algos(dev); dma_release_channel(dev_data->dma_rx2); dma_release_channel(dev_data->dma_rx1); @@ -2453,8 +2452,8 @@ release_dma: destroy_dma_pool: dma_pool_destroy(dev_data->sc_pool); - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } |