summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 15:17:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 15:17:17 -0700
commit80cee03bf1d626db0278271b505d7f5febb37bba (patch)
tree6fc86272106f526a9d07343c524612aa493539e6 /drivers
parentaae3dbb4776e7916b6cd442d00159bea27a695c1 (diff)
parent2d45a7e89833f88b38112292ff227af437f81f2f (diff)
downloadlinux-80cee03bf1d626db0278271b505d7f5febb37bba.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.14: API: - Defer scompress scratch buffer allocation to first use. - Add __crypto_xor that takes separte src and dst operands. - Add ahash multiple registration interface. - Revamped aead/skcipher algif code to fix async IO properly. Drivers: - Add non-SIMD fallback code path on ARM for SVE. - Add AMD Security Processor framework for ccp. - Add support for RSA in ccp. - Add XTS-AES-256 support for CCP version 5. - Add support for PRNG in sun4i-ss. - Add support for DPAA2 in caam. - Add ARTPEC crypto support. - Add Freescale RNGC hwrng support. - Add Microchip / Atmel ECC driver. - Add support for STM32 HASH module" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits) crypto: af_alg - get_page upon reassignment to TX SGL crypto: cavium/nitrox - Fix an error handling path in 'nitrox_probe()' crypto: inside-secure - fix an error handling path in safexcel_probe() crypto: rockchip - Don't dequeue the request when device is busy crypto: cavium - add release_firmware to all return case crypto: sahara - constify platform_device_id MAINTAINERS: Add ARTPEC crypto maintainer crypto: axis - add ARTPEC-6/7 crypto accelerator driver crypto: hash - add crypto_(un)register_ahashes() dt-bindings: crypto: add ARTPEC crypto crypto: algif_aead - fix comment regarding memory layout crypto: ccp - use dma_mapping_error to check map error lib/mpi: fix build with clang crypto: sahara - Remove leftover from previous used spinlock crypto: sahara - Fix dma unmap direction crypto: af_alg - consolidation of duplicate code crypto: caam - Remove unused dentry members crypto: ccp - select CONFIG_CRYPTO_RSA crypto: ccp - avoid uninitialized variable warning crypto: serpent - improve __serpent_setkey with UBSAN ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig20
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c42
-rw-r--r--drivers/char/hw_random/imx-rngc.c331
-rw-r--r--drivers/crypto/Kconfig49
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/atmel-ecc.c781
-rw-r--r--drivers/crypto/atmel-ecc.h128
-rw-r--r--drivers/crypto/atmel-sha.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/Makefile1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c3192
-rw-r--r--drivers/crypto/bcm/cipher.c114
-rw-r--r--drivers/crypto/bcm/cipher.h13
-rw-r--r--drivers/crypto/caam/caamalg.c66
-rw-r--r--drivers/crypto/caam/caamalg_desc.c5
-rw-r--r--drivers/crypto/caam/caamalg_qi.c55
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/caam/caamrng.c6
-rw-r--r--drivers/crypto/caam/ctrl.c127
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/error.c40
-rw-r--r--drivers/crypto/caam/error.h4
-rw-r--r--drivers/crypto/caam/intern.h11
-rw-r--r--drivers/crypto/caam/jr.c7
-rw-r--r--drivers/crypto/caam/qi.c30
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h81
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h43
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c13
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/ccp/Kconfig22
-rw-r--r--drivers/crypto/ccp/Makefile7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c96
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c299
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h36
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c15
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c20
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c28
-rw-r--r--drivers/crypto/ccp/ccp-dev.c134
-rw-r--r--drivers/crypto/ccp/ccp-dev.h30
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c133
-rw-r--r--drivers/crypto/ccp/ccp-pci.c356
-rw-r--r--drivers/crypto/ccp/ccp-platform.c293
-rw-r--r--drivers/crypto/ccp/sp-dev.c277
-rw-r--r--drivers/crypto/ccp/sp-dev.h133
-rw-r--r--drivers/crypto/ccp/sp-pci.c276
-rw-r--r--drivers/crypto/ccp/sp-platform.c256
-rw-r--r--drivers/crypto/geode-aes.c17
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c2
-rw-r--r--drivers/crypto/mxc-scc.c4
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/n2_core.c60
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c74
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h15
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c103
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c119
-rw-r--r--drivers/crypto/sahara.c14
-rw-r--r--drivers/crypto/stm32/Kconfig19
-rw-r--r--drivers/crypto/stm32/Makefile4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1575
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c17
-rw-r--r--drivers/crypto/sunxi-ss/Makefile1
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c30
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c56
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h11
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c109
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h22
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c37
-rw-r--r--drivers/crypto/vmx/aes_ctr.c3
-rw-r--r--drivers/md/dm-crypt.c11
83 files changed, 8545 insertions, 1406 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1b223c32a8ae..95a031e9eced 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -13,10 +13,8 @@ menuconfig HW_RANDOM
that's usually called /dev/hwrng, and which exposes one
of possibly several hardware random number generators.
- These hardware random number generators do not feed directly
- into the kernel's random number generator. That is usually
- handled by the "rngd" daemon. Documentation/hw_random.txt
- has more information.
+ These hardware random number generators do feed into the
+ kernel's random number generator entropy pool.
If unsure, say Y.
@@ -255,6 +253,20 @@ config HW_RANDOM_MXC_RNGA
If unsure, say Y.
+config HW_RANDOM_IMX_RNGC
+ tristate "Freescale i.MX RNGC Random Number Generator"
+ depends on ARCH_MXC
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator Version C hardware found on some Freescale i.MX
+ processors. Version B is also supported by this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-rngc.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b085975ec1d2..39a67defac67 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 503a41dfa193..9701ac7d8b47 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -28,7 +28,10 @@
#define RNG_MODULE_NAME "hw_random"
static struct hwrng *current_rng;
+/* the current rng has been explicitly chosen by user via sysfs */
+static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
+/* list of registered rngs, sorted decending by quality */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -303,6 +306,7 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
err = 0;
+ cur_rng_set_by_user = 1;
if (rng != current_rng)
err = set_current_rng(rng);
break;
@@ -351,16 +355,27 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
+static ssize_t hwrng_attr_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+}
+
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
hwrng_attr_current_show,
hwrng_attr_current_store);
static DEVICE_ATTR(rng_available, S_IRUGO,
hwrng_attr_available_show,
NULL);
+static DEVICE_ATTR(rng_selected, S_IRUGO,
+ hwrng_attr_selected_show,
+ NULL);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
+ &dev_attr_rng_selected.attr,
NULL
};
@@ -417,6 +432,7 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
+ struct list_head *rng_list_ptr;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -432,14 +448,27 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ /* rng_list is sorted by decreasing quality */
+ list_for_each(rng_list_ptr, &rng_list) {
+ tmp = list_entry(rng_list_ptr, struct hwrng, list);
+ if (tmp->quality < rng->quality)
+ break;
+ }
+ list_add_tail(&rng->list, rng_list_ptr);
+
old_rng = current_rng;
err = 0;
- if (!old_rng) {
+ if (!old_rng ||
+ (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+ /*
+ * Set new rng as current as the new rng source
+ * provides better entropy quality and was not
+ * chosen by userspace.
+ */
err = set_current_rng(rng);
if (err)
goto out_unlock;
}
- list_add_tail(&rng->list, &rng_list);
if (old_rng && !rng->init) {
/*
@@ -466,12 +495,13 @@ void hwrng_unregister(struct hwrng *rng)
list_del(&rng->list);
if (current_rng == rng) {
drop_current_rng();
+ cur_rng_set_by_user = 0;
+ /* rng_list is sorted by quality, use the best (=first) one */
if (!list_empty(&rng_list)) {
- struct hwrng *tail;
-
- tail = list_entry(rng_list.prev, struct hwrng, list);
+ struct hwrng *new_rng;
- set_current_rng(tail);
+ new_rng = list_entry(rng_list.next, struct hwrng, list);
+ set_current_rng(new_rng);
}
}
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
new file mode 100644
index 000000000000..88db42d30760
--- /dev/null
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -0,0 +1,331 @@
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2017 Martin Kaiser <martin@kaiser.cx>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+
+#define RNGC_COMMAND 0x0004
+#define RNGC_CONTROL 0x0008
+#define RNGC_STATUS 0x000C
+#define RNGC_ERROR 0x0010
+#define RNGC_FIFO 0x0014
+
+#define RNGC_CMD_CLR_ERR 0x00000020
+#define RNGC_CMD_CLR_INT 0x00000010
+#define RNGC_CMD_SEED 0x00000002
+#define RNGC_CMD_SELF_TEST 0x00000001
+
+#define RNGC_CTRL_MASK_ERROR 0x00000040
+#define RNGC_CTRL_MASK_DONE 0x00000020
+
+#define RNGC_STATUS_ERROR 0x00010000
+#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
+#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
+#define RNGC_STATUS_SEED_DONE 0x00000020
+#define RNGC_STATUS_ST_DONE 0x00000010
+
+#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
+
+#define RNGC_TIMEOUT 3000 /* 3 sec */
+
+
+static bool self_test = true;
+module_param(self_test, bool, 0);
+
+struct imx_rngc {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+ struct completion rng_op_done;
+ /*
+ * err_reg is written only by the irq handler and read only
+ * when interrupts are masked, we need no spinlock
+ */
+ u32 err_reg;
+};
+
+
+static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc)
+{
+ u32 ctrl, cmd;
+
+ /* mask interrupts */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * CLR_INT clears the interrupt only if there's no error
+ * CLR_ERR clear the interrupt and the error register if there
+ * is an error
+ */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR;
+ writel(cmd, rngc->base + RNGC_COMMAND);
+}
+
+static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc)
+{
+ u32 ctrl;
+
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR);
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+}
+
+static int imx_rngc_self_test(struct imx_rngc *rngc)
+{
+ u32 cmd;
+ int ret;
+
+ imx_rngc_irq_unmask(rngc);
+
+ /* run self test */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ if (rngc->err_reg != 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ unsigned int status;
+ unsigned int level;
+ int retval = 0;
+
+ while (max >= sizeof(u32)) {
+ status = readl(rngc->base + RNGC_STATUS);
+
+ /* is there some error while reading this random number? */
+ if (status & RNGC_STATUS_ERROR)
+ break;
+
+ /* how many random numbers are in FIFO? [0-16] */
+ level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
+ RNGC_STATUS_FIFO_LEVEL_SHIFT;
+
+ if (level) {
+ /* retrieve a random number from FIFO */
+ *(u32 *)data = readl(rngc->base + RNGC_FIFO);
+
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ }
+ }
+
+ return retval ? retval : -EIO;
+}
+
+static irqreturn_t imx_rngc_irq(int irq, void *priv)
+{
+ struct imx_rngc *rngc = (struct imx_rngc *)priv;
+ u32 status;
+
+ /*
+ * clearing the interrupt will also clear the error register
+ * read error and status before clearing
+ */
+ status = readl(rngc->base + RNGC_STATUS);
+ rngc->err_reg = readl(rngc->base + RNGC_ERROR);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE))
+ complete(&rngc->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_rngc_init(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ u32 cmd;
+ int ret;
+
+ /* clear error */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+
+ /* create seed, repeat while there is some statistical error */
+ do {
+ imx_rngc_irq_unmask(rngc);
+
+ /* seed creation */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done,
+ RNGC_TIMEOUT);
+
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
+
+ return rngc->err_reg ? -EIO : 0;
+}
+
+static int imx_rngc_probe(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
+ if (!rngc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rngc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rngc->base))
+ return PTR_ERR(rngc->base);
+
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rngc->clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ return PTR_ERR(rngc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Couldn't get irq %d\n", irq);
+ return irq;
+ }
+
+ ret = clk_prepare_enable(rngc->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ goto err;
+ }
+
+ init_completion(&rngc->rng_op_done);
+
+ rngc->rng.name = pdev->name;
+ rngc->rng.init = imx_rngc_init;
+ rngc->rng.read = imx_rngc_read;
+
+ rngc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rngc);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (self_test) {
+ ret = imx_rngc_self_test(rngc);
+ if (ret) {
+ dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+ goto err;
+ }
+ }
+
+ ret = hwrng_register(&rngc->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+ return 0;
+
+err:
+ clk_disable_unprepare(rngc->clk);
+
+ return ret;
+}
+
+static int __exit imx_rngc_remove(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rngc->rng);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int imx_rngc_suspend(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+static int imx_rngc_resume(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_prepare_enable(rngc->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ .suspend = imx_rngc_suspend,
+ .resume = imx_rngc_resume,
+};
+#endif
+
+static const struct of_device_id imx_rngc_dt_ids[] = {
+ { .compatible = "fsl,imx25-rngb", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
+
+static struct platform_driver imx_rngc_driver = {
+ .driver = {
+ .name = "imx_rngc",
+#ifdef CONFIG_PM
+ .pm = &imx_rngc_pm_ops,
+#endif
+ .of_match_table = imx_rngc_dt_ids,
+ },
+ .remove = __exit_p(imx_rngc_remove),
+};
+
+module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4b75084fabad..fe33c199fc1a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -525,12 +525,26 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_ATMEL_ECC
+ tristate "Support for Microchip / Atmel ECC hw accelerator"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on I2C
+ select CRYPTO_ECDH
+ select CRC16
+ help
+ Microhip / Atmel ECC hw accelerator.
+ Select this if you want to use the Microchip / Atmel module for
+ ECDH algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-ecc.
+
config CRYPTO_DEV_CCP
- bool "Support for AMD Cryptographic Coprocessor"
+ bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
help
- The AMD Cryptographic Coprocessor provides hardware offload support
- for encryption, hashing and related operations.
+ The AMD Secure Processor provides support for the Cryptographic Coprocessor
+ (CCP) and the Platform Security Processor (PSP) devices.
if CRYPTO_DEV_CCP
source "drivers/crypto/ccp/Kconfig"
@@ -616,6 +630,14 @@ config CRYPTO_DEV_SUN4I_SS
To compile this driver as a module, choose M here: the module
will be called sun4i-ss.
+config CRYPTO_DEV_SUN4I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN4I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
@@ -686,4 +708,25 @@ config CRYPTO_DEV_SAFEXCEL
chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
algorithms.
+config CRYPTO_DEV_ARTPEC6
+ tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration."
+ depends on ARM && (ARCH_ARTPEC || COMPILE_TEST)
+ depends on HAS_DMA
+ depends on OF
+ select CRYPTO_AEAD
+ select CRYPTO_AES
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_CTR
+ select CRYPTO_HASH
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA384
+ select CRYPTO_SHA512
+ help
+ Enables the driver for the on-chip crypto accelerator
+ of Axis ARTPEC SoCs.
+
+ To compile this driver as a module, choose M here.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2c555a3393b2..808432b44c6b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
@@ -35,7 +36,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
-obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/
+obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
@@ -43,3 +44,4 @@ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
new file mode 100644
index 000000000000..e66f18a0ddd0
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.c
@@ -0,0 +1,781 @@
+/*
+ * Microchip / Atmel ECC (I2C) driver.
+ *
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/ecdh.h>
+#include <crypto/kpp.h>
+#include "atmel-ecc.h"
+
+/* Used for binding tfm objects to i2c clients. */
+struct atmel_ecc_driver_data {
+ struct list_head i2c_client_list;
+ spinlock_t i2c_list_lock;
+} ____cacheline_aligned;
+
+static struct atmel_ecc_driver_data driver_data;
+
+/**
+ * atmel_ecc_i2c_client_priv - i2c_client private data
+ * @client : pointer to i2c client device
+ * @i2c_client_list_node: part of i2c_client_list
+ * @lock : lock for sending i2c commands
+ * @wake_token : wake token array of zeros
+ * @wake_token_sz : size in bytes of the wake_token
+ * @tfm_count : number of active crypto transformations on i2c client
+ *
+ * Reads and writes from/to the i2c client are sequential. The first byte
+ * transmitted to the device is treated as the byte size. Any attempt to send
+ * more than this number of bytes will cause the device to not ACK those bytes.
+ * After the host writes a single command byte to the input buffer, reads are
+ * prohibited until after the device completes command execution. Use a mutex
+ * when sending i2c commands.
+ */
+struct atmel_ecc_i2c_client_priv {
+ struct i2c_client *client;
+ struct list_head i2c_client_list_node;
+ struct mutex lock;
+ u8 wake_token[WAKE_TOKEN_MAX_SIZE];
+ size_t wake_token_sz;
+ atomic_t tfm_count ____cacheline_aligned;
+};
+
+/**
+ * atmel_ecdh_ctx - transformation context
+ * @client : pointer to i2c client device
+ * @fallback : used for unsupported curves or when user wants to use its own
+ * private key.
+ * @public_key : generated when calling set_secret(). It's the responsibility
+ * of the user to not call set_secret() while
+ * generate_public_key() or compute_shared_secret() are in flight.
+ * @curve_id : elliptic curve id
+ * @n_sz : size in bytes of the n prime
+ * @do_fallback: true when the device doesn't support the curve or when the user
+ * wants to use its own private key.
+ */
+struct atmel_ecdh_ctx {
+ struct i2c_client *client;
+ struct crypto_kpp *fallback;
+ const u8 *public_key;
+ unsigned int curve_id;
+ size_t n_sz;
+ bool do_fallback;
+};
+
+/**
+ * atmel_ecc_work_data - data structure representing the work
+ * @ctx : transformation context.
+ * @cbk : pointer to a callback function to be invoked upon completion of this
+ * request. This has the form:
+ * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
+ * where:
+ * @work_data: data structure representing the work
+ * @areq : optional pointer to an argument passed with the original
+ * request.
+ * @status : status returned from the i2c client device or i2c error.
+ * @areq: optional pointer to a user argument for use at callback time.
+ * @work: describes the task to be executed.
+ * @cmd : structure used for communicating with the device.
+ */
+struct atmel_ecc_work_data {
+ struct atmel_ecdh_ctx *ctx;
+ void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
+ int status);
+ void *areq;
+ struct work_struct work;
+ struct atmel_ecc_cmd cmd;
+};
+
+static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
+{
+ return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
+}
+
+/**
+ * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
+ * CRC16 verification of the count, opcode, param1, param2 and data bytes.
+ * The checksum is saved in little-endian format in the least significant
+ * two bytes of the command. CRC polynomial is 0x8005 and the initial register
+ * value should be zero.
+ *
+ * @cmd : structure used for communicating with the device.
+ */
+static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
+{
+ u8 *data = &cmd->count;
+ size_t len = cmd->count - CRC_SIZE;
+ u16 *crc16 = (u16 *)(data + len);
+
+ *crc16 = atmel_ecc_crc16(0, data, len);
+}
+
+static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from Configuration zone that contains the lock bytes
+ * (UserExtra, Selector, LockValue, LockConfig).
+ */
+ cmd->param1 = CONFIG_ZONE;
+ cmd->param2 = DEVICE_LOCK_ADDR;
+ cmd->count = READ_COUNT;
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+}
+
+static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
+{
+ cmd->word_addr = COMMAND;
+ cmd->count = GENKEY_COUNT;
+ cmd->opcode = OPCODE_GENKEY;
+ cmd->param1 = GENKEY_MODE_PRIVATE;
+ /* a random private key will be generated and stored in slot keyID */
+ cmd->param2 = cpu_to_le16(keyid);
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_GENKEY;
+ cmd->rxsize = GENKEY_RSP_SIZE;
+}
+
+static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
+ struct scatterlist *pubkey)
+{
+ size_t copied;
+
+ cmd->word_addr = COMMAND;
+ cmd->count = ECDH_COUNT;
+ cmd->opcode = OPCODE_ECDH;
+ cmd->param1 = ECDH_PREFIX_MODE;
+ /* private key slot */
+ cmd->param2 = cpu_to_le16(DATA_SLOT_2);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_ECDH;
+ cmd->rxsize = ECDH_RSP_SIZE;
+
+ return 0;
+}
+
+/*
+ * After wake and after execution of a command, there will be error, status, or
+ * result bytes in the device's output register that can be retrieved by the
+ * system. When the length of that group is four bytes, the codes returned are
+ * detailed in error_list.
+ */
+static int atmel_ecc_status(struct device *dev, u8 *status)
+{
+ size_t err_list_len = ARRAY_SIZE(error_list);
+ int i;
+ u8 err_id = status[1];
+
+ if (*status != STATUS_SIZE)
+ return 0;
+
+ if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
+ return 0;
+
+ for (i = 0; i < err_list_len; i++)
+ if (error_list[i].value == err_id)
+ break;
+
+ /* if err_id is not in the error_list then ignore it */
+ if (i != err_list_len) {
+ dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
+ return err_id;
+ }
+
+ return 0;
+}
+
+static int atmel_ecc_wakeup(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ u8 status[STATUS_RSP_SIZE];
+ int ret;
+
+ /*
+ * The device ignores any levels or transitions on the SCL pin when the
+ * device is idle, asleep or during waking up. Don't check for error
+ * when waking up the device.
+ */
+ i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+
+ /*
+ * Wait to wake the device. Typical execution times for ecdh and genkey
+ * are around tens of milliseconds. Delta is chosen to 50 microseconds.
+ */
+ usleep_range(TWHI_MIN, TWHI_MAX);
+
+ ret = i2c_master_recv(client, status, STATUS_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return atmel_ecc_status(&client->dev, status);
+}
+
+static int atmel_ecc_sleep(struct i2c_client *client)
+{
+ u8 sleep = SLEEP_TOKEN;
+
+ return i2c_master_send(client, &sleep, 1);
+}
+
+static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
+ int status)
+{
+ struct kpp_request *req = areq;
+ struct atmel_ecdh_ctx *ctx = work_data->ctx;
+ struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ size_t copied;
+ size_t n_sz = ctx->n_sz;
+
+ if (status)
+ goto free_work_data;
+
+ /* copy the shared secret */
+ copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
+ n_sz);
+ if (copied != n_sz)
+ status = -EINVAL;
+
+ /* fall through */
+free_work_data:
+ kzfree(work_data);
+ kpp_request_complete(req, status);
+}
+
+/*
+ * atmel_ecc_send_receive() - send a command to the device and receive its
+ * response.
+ * @client: i2c client device
+ * @cmd : structure used to communicate with the device
+ *
+ * After the device receives a Wake token, a watchdog counter starts within the
+ * device. After the watchdog timer expires, the device enters sleep mode
+ * regardless of whether some I/O transmission or command execution is in
+ * progress. If a command is attempted when insufficient time remains prior to
+ * watchdog timer execution, the device will return the watchdog timeout error
+ * code without attempting to execute the command. There is no way to reset the
+ * counter other than to put the device into sleep or idle mode and then
+ * wake it up again.
+ */
+static int atmel_ecc_send_receive(struct i2c_client *client,
+ struct atmel_ecc_cmd *cmd)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ int ret;
+
+ mutex_lock(&i2c_priv->lock);
+
+ ret = atmel_ecc_wakeup(client);
+ if (ret)
+ goto err;
+
+ /* send the command */
+ ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
+ if (ret < 0)
+ goto err;
+
+ /* delay the appropriate amount of time for command to execute */
+ msleep(cmd->msecs);
+
+ /* receive the response */
+ ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
+ if (ret < 0)
+ goto err;
+
+ /* put the device into low-power mode */
+ ret = atmel_ecc_sleep(client);
+ if (ret < 0)
+ goto err;
+
+ mutex_unlock(&i2c_priv->lock);
+ return atmel_ecc_status(&client->dev, cmd->data);
+err:
+ mutex_unlock(&i2c_priv->lock);
+ return ret;
+}
+
+static void atmel_ecc_work_handler(struct work_struct *work)
+{
+ struct atmel_ecc_work_data *work_data =
+ container_of(work, struct atmel_ecc_work_data, work);
+ struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ struct i2c_client *client = work_data->ctx->client;
+ int status;
+
+ status = atmel_ecc_send_receive(client, cmd);
+ work_data->cbk(work_data, work_data->areq, status);
+}
+
+static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
+ void (*cbk)(struct atmel_ecc_work_data *work_data,
+ void *areq, int status),
+ void *areq)
+{
+ work_data->cbk = (void *)cbk;
+ work_data->areq = areq;
+
+ INIT_WORK(&work_data->work, atmel_ecc_work_handler);
+ schedule_work(&work_data->work);
+}
+
+static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
+{
+ if (curve_id == ECC_CURVE_NIST_P256)
+ return ATMEL_ECC_NIST_P256_N_SIZE;
+
+ return 0;
+}
+
+/*
+ * A random private key is generated and stored in the device. The device
+ * returns the pair public key.
+ */
+static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct atmel_ecc_cmd *cmd;
+ void *public_key;
+ struct ecdh params;
+ int ret = -ENOMEM;
+
+ /* free the old public key, if any */
+ kfree(ctx->public_key);
+ /* make sure you don't free the old public key twice */
+ ctx->public_key = NULL;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n");
+ return -EINVAL;
+ }
+
+ ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
+ if (!ctx->n_sz || params.key_size) {
+ /* fallback to ecdh software implementation */
+ ctx->do_fallback = true;
+ return crypto_kpp_set_secret(ctx->fallback, buf, len);
+ }
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL);
+ if (!public_key)
+ goto free_cmd;
+
+ ctx->do_fallback = false;
+ ctx->curve_id = params.curve_id;
+
+ atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
+
+ ret = atmel_ecc_send_receive(ctx->client, cmd);
+ if (ret)
+ goto free_public_key;
+
+ /* save the public key */
+ memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE);
+ ctx->public_key = public_key;
+
+ kfree(cmd);
+ return 0;
+
+free_public_key:
+ kfree(public_key);
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+static int atmel_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ size_t copied;
+ int ret = 0;
+
+ if (ctx->do_fallback) {
+ kpp_request_set_tfm(req, ctx->fallback);
+ return crypto_kpp_generate_public_key(req);
+ }
+
+ /* public key was saved at private key generation */
+ copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
+ ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct atmel_ecc_work_data *work_data;
+ gfp_t gfp;
+ int ret;
+
+ if (ctx->do_fallback) {
+ kpp_request_set_tfm(req, ctx->fallback);
+ return crypto_kpp_compute_shared_secret(req);
+ }
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ work_data = kmalloc(sizeof(*work_data), gfp);
+ if (!work_data)
+ return -ENOMEM;
+
+ work_data->ctx = ctx;
+
+ ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
+ if (ret)
+ goto free_work_data;
+
+ atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
+
+ return -EINPROGRESS;
+
+free_work_data:
+ kfree(work_data);
+ return ret;
+}
+
+static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
+ struct i2c_client *client = ERR_PTR(-ENODEV);
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.i2c_list_lock);
+
+ if (list_empty(&driver_data.i2c_client_list)) {
+ spin_unlock(&driver_data.i2c_list_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(i2c_priv, &driver_data.i2c_client_list,
+ i2c_client_list_node) {
+ tfm_cnt = atomic_read(&i2c_priv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_i2c_priv = i2c_priv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_i2c_priv) {
+ atomic_inc(&min_i2c_priv->tfm_count);
+ client = min_i2c_priv->client;
+ }
+
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ return client;
+}
+
+static void atmel_ecc_i2c_client_free(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ atomic_dec(&i2c_priv->tfm_count);
+}
+
+static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
+{
+ const char *alg = kpp_alg_name(tfm);
+ struct crypto_kpp *fallback;
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->client = atmel_ecc_i2c_client_alloc();
+ if (IS_ERR(ctx->client)) {
+ pr_err("tfm - i2c_client binding failed\n");
+ return PTR_ERR(ctx->client);
+ }
+
+ fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
+
+ dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
+
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ kfree(ctx->public_key);
+ crypto_free_kpp(ctx->fallback);
+ atmel_ecc_i2c_client_free(ctx->client);
+}
+
+static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->fallback);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ return ATMEL_ECC_PUBKEY_SIZE;
+}
+
+static struct kpp_alg atmel_ecdh = {
+ .set_secret = atmel_ecdh_set_secret,
+ .generate_public_key = atmel_ecdh_generate_public_key,
+ .compute_shared_secret = atmel_ecdh_compute_shared_secret,
+ .init = atmel_ecdh_init_tfm,
+ .exit = atmel_ecdh_exit_tfm,
+ .max_size = atmel_ecdh_max_size,
+ .base = {
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_name = "ecdh",
+ .cra_driver_name = "atmel-ecdh",
+ .cra_priority = ATMEL_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct atmel_ecdh_ctx),
+ },
+};
+
+static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
+{
+ u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
+
+ /* return the size of the wake_token in bytes */
+ return DIV_ROUND_UP(no_of_bits, 8);
+}
+
+static int device_sanity_check(struct i2c_client *client)
+{
+ struct atmel_ecc_cmd *cmd;
+ int ret;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ atmel_ecc_init_read_cmd(cmd);
+
+ ret = atmel_ecc_send_receive(client, cmd);
+ if (ret)
+ goto free_cmd;
+
+ /*
+ * It is vital that the Configuration, Data and OTP zones be locked
+ * prior to release into the field of the system containing the device.
+ * Failure to lock these zones may permit modification of any secret
+ * keys and may lead to other security problems.
+ */
+ if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
+ dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
+ ret = -ENOTSUPP;
+ }
+
+ /* fall through */
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+static int atmel_ecc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv;
+ struct device *dev = &client->dev;
+ int ret;
+ u32 bus_clk_rate;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "I2C_FUNC_I2C not supported\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(client->adapter->dev.of_node,
+ "clock-frequency", &bus_clk_rate);
+ if (ret) {
+ dev_err(dev, "of: failed to read clock-frequency property\n");
+ return ret;
+ }
+
+ if (bus_clk_rate > 1000000L) {
+ dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ bus_clk_rate);
+ return -EINVAL;
+ }
+
+ i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
+ if (!i2c_priv)
+ return -ENOMEM;
+
+ i2c_priv->client = client;
+ mutex_init(&i2c_priv->lock);
+
+ /*
+ * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
+ * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
+ * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
+ */
+ i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
+
+ memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
+
+ atomic_set(&i2c_priv->tfm_count, 0);
+
+ i2c_set_clientdata(client, i2c_priv);
+
+ ret = device_sanity_check(client);
+ if (ret)
+ return ret;
+
+ spin_lock(&driver_data.i2c_list_lock);
+ list_add_tail(&i2c_priv->i2c_client_list_node,
+ &driver_data.i2c_client_list);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ ret = crypto_register_kpp(&atmel_ecdh);
+ if (ret) {
+ spin_lock(&driver_data.i2c_list_lock);
+ list_del(&i2c_priv->i2c_client_list_node);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ dev_err(dev, "%s alg registration failed\n",
+ atmel_ecdh.base.cra_driver_name);
+ } else {
+ dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
+ }
+
+ return ret;
+}
+
+static int atmel_ecc_remove(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ /* Return EBUSY if i2c client already allocated. */
+ if (atomic_read(&i2c_priv->tfm_count)) {
+ dev_err(&client->dev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ crypto_unregister_kpp(&atmel_ecdh);
+
+ spin_lock(&driver_data.i2c_list_lock);
+ list_del(&i2c_priv->i2c_client_list_node);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ecc_dt_ids[] = {
+ {
+ .compatible = "atmel,atecc508a",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
+#endif
+
+static const struct i2c_device_id atmel_ecc_id[] = {
+ { "atecc508a", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
+
+static struct i2c_driver atmel_ecc_driver = {
+ .driver = {
+ .name = "atmel-ecc",
+ .of_match_table = of_match_ptr(atmel_ecc_dt_ids),
+ },
+ .probe = atmel_ecc_probe,
+ .remove = atmel_ecc_remove,
+ .id_table = atmel_ecc_id,
+};
+
+static int __init atmel_ecc_init(void)
+{
+ spin_lock_init(&driver_data.i2c_list_lock);
+ INIT_LIST_HEAD(&driver_data.i2c_client_list);
+ return i2c_add_driver(&atmel_ecc_driver);
+}
+
+static void __exit atmel_ecc_exit(void)
+{
+ flush_scheduled_work();
+ i2c_del_driver(&atmel_ecc_driver);
+}
+
+module_init(atmel_ecc_init);
+module_exit(atmel_ecc_exit);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
new file mode 100644
index 000000000000..25232c8abcc2
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __ATMEL_ECC_H__
+#define __ATMEL_ECC_H__
+
+#define ATMEL_ECC_PRIORITY 300
+
+#define COMMAND 0x03 /* packet function */
+#define SLEEP_TOKEN 0x01
+#define WAKE_TOKEN_MAX_SIZE 8
+
+/* Definitions of Data and Command sizes */
+#define WORD_ADDR_SIZE 1
+#define COUNT_SIZE 1
+#define CRC_SIZE 2
+#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
+
+/* size in bytes of the n prime */
+#define ATMEL_ECC_NIST_P256_N_SIZE 32
+#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
+
+#define STATUS_RSP_SIZE 4
+#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
+ CMD_OVERHEAD_SIZE)
+#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
+#define MAX_RSP_SIZE GENKEY_RSP_SIZE
+
+/**
+ * atmel_ecc_cmd - structure used for communicating with the device.
+ * @word_addr: indicates the function of the packet sent to the device. This
+ * byte should have a value of COMMAND for normal operation.
+ * @count : number of bytes to be transferred to (or from) the device.
+ * @opcode : the command code.
+ * @param1 : the first parameter; always present.
+ * @param2 : the second parameter; always present.
+ * @data : optional remaining input data. Includes a 2-byte CRC.
+ * @rxsize : size of the data received from i2c client.
+ * @msecs : command execution time in milliseconds
+ */
+struct atmel_ecc_cmd {
+ u8 word_addr;
+ u8 count;
+ u8 opcode;
+ u8 param1;
+ u16 param2;
+ u8 data[MAX_RSP_SIZE];
+ u8 msecs;
+ u16 rxsize;
+} __packed;
+
+/* Status/Error codes */
+#define STATUS_SIZE 0x04
+#define STATUS_NOERR 0x00
+#define STATUS_WAKE_SUCCESSFUL 0x11
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} error_list[] = {
+ { 0x01, "CheckMac or Verify miscompare" },
+ { 0x03, "Parse Error" },
+ { 0x05, "ECC Fault" },
+ { 0x0F, "Execution Error" },
+ { 0xEE, "Watchdog about to expire" },
+ { 0xFF, "CRC or other communication error" },
+};
+
+/* Definitions for eeprom organization */
+#define CONFIG_ZONE 0
+
+/* Definitions for Indexes common to all commands */
+#define RSP_DATA_IDX 1 /* buffer index of data in response */
+#define DATA_SLOT_2 2 /* used for ECDH private key */
+
+/* Definitions for the device lock state */
+#define DEVICE_LOCK_ADDR 0x15
+#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
+#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
+
+/*
+ * Wake High delay to data communication (microseconds). SDA should be stable
+ * high for this entire duration.
+ */
+#define TWHI_MIN 1500
+#define TWHI_MAX 1550
+
+/* Wake Low duration */
+#define TWLO_USEC 60
+
+/* Command execution time (milliseconds) */
+#define MAX_EXEC_TIME_ECDH 58
+#define MAX_EXEC_TIME_GENKEY 115
+#define MAX_EXEC_TIME_READ 1
+
+/* Command opcode */
+#define OPCODE_ECDH 0x43
+#define OPCODE_GENKEY 0x40
+#define OPCODE_READ 0x02
+
+/* Definitions for the READ Command */
+#define READ_COUNT 7
+
+/* Definitions for the GenKey Command */
+#define GENKEY_COUNT 7
+#define GENKEY_MODE_PRIVATE 0x04
+
+/* Definitions for the ECDH Command */
+#define ECDH_COUNT 71
+#define ECDH_PREFIX_MODE 0x00
+
+#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index dad4e5bad827..3e2f41b3eaf3 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2883,7 +2883,7 @@ sha_dd_err:
static int atmel_sha_remove(struct platform_device *pdev)
{
- static struct atmel_sha_dev *sha_dd;
+ struct atmel_sha_dev *sha_dd;
sha_dd = platform_get_drvdata(pdev);
if (!sha_dd)
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index b25f1b3c981f..f4b335dda568 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1487,7 +1487,7 @@ tdes_dd_err:
static int atmel_tdes_remove(struct platform_device *pdev)
{
- static struct atmel_tdes_dev *tdes_dd;
+ struct atmel_tdes_dev *tdes_dd;
tdes_dd = platform_get_drvdata(pdev);
if (!tdes_dd)
diff --git a/drivers/crypto/axis/Makefile b/drivers/crypto/axis/Makefile
new file mode 100644
index 000000000000..be9a84a4b667
--- /dev/null
+++ b/drivers/crypto/axis/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) := artpec6_crypto.o
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
new file mode 100644
index 000000000000..d9fbbf01062b
--- /dev/null
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -0,0 +1,3192 @@
+/*
+ * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
+ *
+ * Copyright (C) 2014-2017 Axis Communications AB
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+
+/* Max length of a line in all cache levels for Artpec SoCs. */
+#define ARTPEC_CACHE_LINE_MAX 32
+
+#define PDMA_OUT_CFG 0x0000
+#define PDMA_OUT_BUF_CFG 0x0004
+#define PDMA_OUT_CMD 0x0008
+#define PDMA_OUT_DESCRQ_PUSH 0x0010
+#define PDMA_OUT_DESCRQ_STAT 0x0014
+
+#define A6_PDMA_IN_CFG 0x0028
+#define A6_PDMA_IN_BUF_CFG 0x002c
+#define A6_PDMA_IN_CMD 0x0030
+#define A6_PDMA_IN_STATQ_PUSH 0x0038
+#define A6_PDMA_IN_DESCRQ_PUSH 0x0044
+#define A6_PDMA_IN_DESCRQ_STAT 0x0048
+#define A6_PDMA_INTR_MASK 0x0068
+#define A6_PDMA_ACK_INTR 0x006c
+#define A6_PDMA_MASKED_INTR 0x0074
+
+#define A7_PDMA_IN_CFG 0x002c
+#define A7_PDMA_IN_BUF_CFG 0x0030
+#define A7_PDMA_IN_CMD 0x0034
+#define A7_PDMA_IN_STATQ_PUSH 0x003c
+#define A7_PDMA_IN_DESCRQ_PUSH 0x0048
+#define A7_PDMA_IN_DESCRQ_STAT 0x004C
+#define A7_PDMA_INTR_MASK 0x006c
+#define A7_PDMA_ACK_INTR 0x0070
+#define A7_PDMA_MASKED_INTR 0x0078
+
+#define PDMA_OUT_CFG_EN BIT(0)
+
+#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
+#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
+
+#define PDMA_OUT_CMD_START BIT(0)
+#define A6_PDMA_OUT_CMD_STOP BIT(3)
+#define A7_PDMA_OUT_CMD_STOP BIT(2)
+
+#define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
+#define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
+
+#define PDMA_IN_CFG_EN BIT(0)
+
+#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
+#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
+#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
+
+#define PDMA_IN_CMD_START BIT(0)
+#define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
+#define A6_PDMA_IN_CMD_STOP BIT(3)
+#define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
+#define A7_PDMA_IN_CMD_STOP BIT(2)
+
+#define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
+#define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
+
+#define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
+#define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
+#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
+
+#define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
+#define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
+#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
+
+#define A6_CRY_MD_OPER GENMASK(19, 16)
+
+#define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
+#define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
+
+#define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
+#define A6_CRY_MD_CIPHER_DECR BIT(22)
+#define A6_CRY_MD_CIPHER_TWEAK BIT(23)
+#define A6_CRY_MD_CIPHER_DSEQ BIT(24)
+
+#define A7_CRY_MD_OPER GENMASK(11, 8)
+
+#define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
+#define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
+
+#define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
+#define A7_CRY_MD_CIPHER_DECR BIT(14)
+#define A7_CRY_MD_CIPHER_TWEAK BIT(15)
+#define A7_CRY_MD_CIPHER_DSEQ BIT(16)
+
+/* DMA metadata constants */
+#define regk_crypto_aes_cbc 0x00000002
+#define regk_crypto_aes_ctr 0x00000003
+#define regk_crypto_aes_ecb 0x00000001
+#define regk_crypto_aes_gcm 0x00000004
+#define regk_crypto_aes_xts 0x00000005
+#define regk_crypto_cache 0x00000002
+#define a6_regk_crypto_dlkey 0x0000000a
+#define a7_regk_crypto_dlkey 0x0000000e
+#define regk_crypto_ext 0x00000001
+#define regk_crypto_hmac_sha1 0x00000007
+#define regk_crypto_hmac_sha256 0x00000009
+#define regk_crypto_hmac_sha384 0x0000000b
+#define regk_crypto_hmac_sha512 0x0000000d
+#define regk_crypto_init 0x00000000
+#define regk_crypto_key_128 0x00000000
+#define regk_crypto_key_192 0x00000001
+#define regk_crypto_key_256 0x00000002
+#define regk_crypto_null 0x00000000
+#define regk_crypto_sha1 0x00000006
+#define regk_crypto_sha256 0x00000008
+#define regk_crypto_sha384 0x0000000a
+#define regk_crypto_sha512 0x0000000c
+
+/* DMA descriptor structures */
+struct pdma_descr_ctrl {
+ unsigned char short_descr : 1;
+ unsigned char pad1 : 1;
+ unsigned char eop : 1;
+ unsigned char intr : 1;
+ unsigned char short_len : 3;
+ unsigned char pad2 : 1;
+} __packed;
+
+struct pdma_data_descr {
+ unsigned int len : 24;
+ unsigned int buf : 32;
+} __packed;
+
+struct pdma_short_descr {
+ unsigned char data[7];
+} __packed;
+
+struct pdma_descr {
+ struct pdma_descr_ctrl ctrl;
+ union {
+ struct pdma_data_descr data;
+ struct pdma_short_descr shrt;
+ };
+};
+
+struct pdma_stat_descr {
+ unsigned char pad1 : 1;
+ unsigned char pad2 : 1;
+ unsigned char eop : 1;
+ unsigned char pad3 : 5;
+ unsigned int len : 24;
+};
+
+/* Each descriptor array can hold max 64 entries */
+#define PDMA_DESCR_COUNT 64
+
+#define MODULE_NAME "Artpec-6 CA"
+
+/* Hash modes (including HMAC variants) */
+#define ARTPEC6_CRYPTO_HASH_SHA1 1
+#define ARTPEC6_CRYPTO_HASH_SHA256 2
+#define ARTPEC6_CRYPTO_HASH_SHA384 3
+#define ARTPEC6_CRYPTO_HASH_SHA512 4
+
+/* Crypto modes */
+#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
+#define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
+#define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
+#define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
+
+/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
+ * It operates on a descriptor array with up to 64 descriptor entries.
+ * The arrays must be 64 byte aligned in memory.
+ *
+ * The ciphering unit has no registers and is completely controlled by
+ * a 4-byte metadata that is inserted at the beginning of each dma packet.
+ *
+ * A dma packet is a sequence of descriptors terminated by setting the .eop
+ * field in the final descriptor of the packet.
+ *
+ * Multiple packets are used for providing context data, key data and
+ * the plain/ciphertext.
+ *
+ * PDMA Descriptors (Array)
+ * +------+------+------+~~+-------+------+----
+ * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
+ * +--+---+--+---+----+-+~~+-------+----+-+----
+ * | | | | |
+ * | | | | |
+ * __|__ +-------++-------++-------+ +----+
+ * | MD | |Payload||Payload||Payload| | MD |
+ * +-----+ +-------++-------++-------+ +----+
+ */
+
+struct artpec6_crypto_bounce_buffer {
+ struct list_head list;
+ size_t length;
+ struct scatterlist *sg;
+ size_t offset;
+ /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
+ * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
+ */
+ void *buf;
+};
+
+struct artpec6_crypto_dma_map {
+ dma_addr_t dma_addr;
+ size_t size;
+ enum dma_data_direction dir;
+};
+
+struct artpec6_crypto_dma_descriptors {
+ struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
+ struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
+ u32 stat[PDMA_DESCR_COUNT] __aligned(64);
+ struct list_head bounce_buffers;
+ /* Enough maps for all out/in buffers, and all three descr. arrays */
+ struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
+ dma_addr_t out_dma_addr;
+ dma_addr_t in_dma_addr;
+ dma_addr_t stat_dma_addr;
+ size_t out_cnt;
+ size_t in_cnt;
+ size_t map_count;
+};
+
+enum artpec6_crypto_variant {
+ ARTPEC6_CRYPTO,
+ ARTPEC7_CRYPTO,
+};
+
+struct artpec6_crypto {
+ void __iomem *base;
+ spinlock_t queue_lock;
+ struct list_head queue; /* waiting for pdma fifo space */
+ struct list_head pending; /* submitted to pdma fifo */
+ struct tasklet_struct task;
+ struct kmem_cache *dma_cache;
+ int pending_count;
+ struct timer_list timer;
+ enum artpec6_crypto_variant variant;
+ void *pad_buffer; /* cache-aligned block padding buffer */
+ void *zero_buffer;
+};
+
+enum artpec6_crypto_hash_flags {
+ HASH_FLAG_INIT_CTX = 2,
+ HASH_FLAG_UPDATE = 4,
+ HASH_FLAG_FINALIZE = 8,
+ HASH_FLAG_HMAC = 16,
+ HASH_FLAG_UPDATE_KEY = 32,
+};
+
+struct artpec6_crypto_req_common {
+ struct list_head list;
+ struct artpec6_crypto_dma_descriptors *dma;
+ struct crypto_async_request *req;
+ void (*complete)(struct crypto_async_request *req);
+ gfp_t gfp_flags;
+};
+
+struct artpec6_hash_request_context {
+ char partial_buffer[SHA512_BLOCK_SIZE];
+ char partial_buffer_out[SHA512_BLOCK_SIZE];
+ char key_buffer[SHA512_BLOCK_SIZE];
+ char pad_buffer[SHA512_BLOCK_SIZE + 32];
+ unsigned char digeststate[SHA512_DIGEST_SIZE];
+ size_t partial_bytes;
+ u64 digcnt;
+ u32 key_md;
+ u32 hash_md;
+ enum artpec6_crypto_hash_flags hash_flags;
+ struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_hash_export_state {
+ char partial_buffer[SHA512_BLOCK_SIZE];
+ unsigned char digeststate[SHA512_DIGEST_SIZE];
+ size_t partial_bytes;
+ u64 digcnt;
+ int oper;
+ unsigned int hash_flags;
+};
+
+struct artpec6_hashalg_context {
+ char hmac_key[SHA512_BLOCK_SIZE];
+ size_t hmac_key_length;
+ struct crypto_shash *child_hash;
+};
+
+struct artpec6_crypto_request_context {
+ u32 cipher_md;
+ bool decrypt;
+ struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_cryptotfm_context {
+ unsigned char aes_key[2*AES_MAX_KEY_SIZE];
+ size_t key_length;
+ u32 key_md;
+ int crypto_type;
+ struct crypto_skcipher *fallback;
+};
+
+struct artpec6_crypto_aead_hw_ctx {
+ __be64 aad_length_bits;
+ __be64 text_length_bits;
+ __u8 J0[AES_BLOCK_SIZE];
+};
+
+struct artpec6_crypto_aead_req_ctx {
+ struct artpec6_crypto_aead_hw_ctx hw_ctx;
+ u32 cipher_md;
+ bool decrypt;
+ struct artpec6_crypto_req_common common;
+ __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
+};
+
+/* The crypto framework makes it hard to avoid this global. */
+static struct device *artpec6_crypto_dev;
+
+static struct dentry *dbgfs_root;
+
+#ifdef CONFIG_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
+#endif
+
+enum {
+ ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
+ ARTPEC6_CRYPTO_PREPARE_HASH_START,
+};
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq);
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
+
+static void
+artpec6_crypto_complete_crypto(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_aead(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_hash(struct crypto_async_request *req);
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
+
+static void
+artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
+
+struct artpec6_crypto_walk {
+ struct scatterlist *sg;
+ size_t offset;
+};
+
+static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
+ struct scatterlist *sg)
+{
+ awalk->sg = sg;
+ awalk->offset = 0;
+}
+
+static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
+ size_t nbytes)
+{
+ while (nbytes && awalk->sg) {
+ size_t piece;
+
+ WARN_ON(awalk->offset > awalk->sg->length);
+
+ piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
+ nbytes -= piece;
+ awalk->offset += piece;
+ if (awalk->offset == awalk->sg->length) {
+ awalk->sg = sg_next(awalk->sg);
+ awalk->offset = 0;
+ }
+
+ }
+
+ return nbytes;
+}
+
+static size_t
+artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
+{
+ WARN_ON(awalk->sg->length == awalk->offset);
+
+ return awalk->sg->length - awalk->offset;
+}
+
+static dma_addr_t
+artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
+{
+ return sg_phys(awalk->sg) + awalk->offset;
+}
+
+static void
+artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct artpec6_crypto_bounce_buffer *b;
+ struct artpec6_crypto_bounce_buffer *next;
+
+ list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+ pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
+ b, b->length, b->offset, b->buf);
+ sg_pcopy_from_buffer(b->sg,
+ 1,
+ b->buf,
+ b->length,
+ b->offset);
+
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+static inline bool artpec6_crypto_busy(void)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ int fifo_count = ac->pending_count;
+
+ return fifo_count > 6;
+}
+
+static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ int ret = -EBUSY;
+
+ spin_lock_bh(&ac->queue_lock);
+
+ if (!artpec6_crypto_busy()) {
+ list_add_tail(&req->list, &ac->pending);
+ artpec6_crypto_start_dma(req);
+ ret = -EINPROGRESS;
+ } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ list_add_tail(&req->list, &ac->queue);
+ } else {
+ artpec6_crypto_common_destroy(req);
+ }
+
+ spin_unlock_bh(&ac->queue_lock);
+
+ return ret;
+}
+
+static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ u32 ind, statd, outd;
+
+ /* Make descriptor content visible to the DMA before starting it. */
+ wmb();
+
+ ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
+ FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
+
+ statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
+ FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
+
+ outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
+ FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
+ writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
+ writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
+ } else {
+ writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
+ writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
+ writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
+ }
+
+ writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
+ writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
+
+ ac->pending_count++;
+}
+
+static void
+artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+
+ dma->out_cnt = 0;
+ dma->in_cnt = 0;
+ dma->map_count = 0;
+ INIT_LIST_HEAD(&dma->bounce_buffers);
+}
+
+static bool fault_inject_dma_descr(void)
+{
+#ifdef CONFIG_FAULT_INJECTION
+ return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
+#else
+ return false;
+#endif
+}
+
+/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
+ * physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len: The length of the data buffer
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success or -ENOSPC if there are no more descriptors available
+ */
+static int
+artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
+ dma_addr_t addr, size_t len, bool eop)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free OUT DMA descriptors available!\n");
+ return -ENOSPC;
+ }
+
+ d = &dma->out[dma->out_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.short_descr = 0;
+ d->ctrl.eop = eop;
+ d->data.len = len;
+ d->data.buf = addr;
+ return 0;
+}
+
+/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data, must be between 1 to 7 bytes
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success
+ * -ENOSPC if no more descriptors are available
+ * -EINVAL if the data length exceeds 7 bytes
+ */
+static int
+artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
+ void *dst, unsigned int len, bool eop)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free OUT DMA descriptors available!\n");
+ return -ENOSPC;
+ } else if (len > 7 || len < 1) {
+ return -EINVAL;
+ }
+ d = &dma->out[dma->out_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.short_descr = 1;
+ d->ctrl.short_len = len;
+ d->ctrl.eop = eop;
+ memcpy(d->shrt.data, dst, len);
+ return 0;
+}
+
+static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
+ struct page *page, size_t offset,
+ size_t size,
+ enum dma_data_direction dir,
+ dma_addr_t *dma_addr_out)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct device *dev = artpec6_crypto_dev;
+ struct artpec6_crypto_dma_map *map;
+ dma_addr_t dma_addr;
+
+ *dma_addr_out = 0;
+
+ if (dma->map_count >= ARRAY_SIZE(dma->maps))
+ return -ENOMEM;
+
+ dma_addr = dma_map_page(dev, page, offset, size, dir);
+ if (dma_mapping_error(dev, dma_addr))
+ return -ENOMEM;
+
+ map = &dma->maps[dma->map_count++];
+ map->size = size;
+ map->dma_addr = dma_addr;
+ map->dir = dir;
+
+ *dma_addr_out = dma_addr;
+
+ return 0;
+}
+
+static int
+artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
+ void *ptr, size_t size,
+ enum dma_data_direction dir,
+ dma_addr_t *dma_addr_out)
+{
+ struct page *page = virt_to_page(ptr);
+ size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
+
+ return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
+ dma_addr_out);
+}
+
+static int
+artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ int ret;
+
+ ret = artpec6_crypto_dma_map_single(common, dma->in,
+ sizeof(dma->in[0]) * dma->in_cnt,
+ DMA_TO_DEVICE, &dma->in_dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_dma_map_single(common, dma->out,
+ sizeof(dma->out[0]) * dma->out_cnt,
+ DMA_TO_DEVICE, &dma->out_dma_addr);
+ if (ret)
+ return ret;
+
+ /* We only read one stat descriptor */
+ dma->stat[dma->in_cnt - 1] = 0;
+
+ /*
+ * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
+ * to be written.
+ */
+ return artpec6_crypto_dma_map_single(common,
+ dma->stat + dma->in_cnt - 1,
+ sizeof(dma->stat[0]),
+ DMA_BIDIRECTIONAL,
+ &dma->stat_dma_addr);
+}
+
+static void
+artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct device *dev = artpec6_crypto_dev;
+ int i;
+
+ for (i = 0; i < dma->map_count; i++) {
+ struct artpec6_crypto_dma_map *map = &dma->maps[i];
+
+ dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
+ }
+
+ dma->map_count = 0;
+}
+
+/** artpec6_crypto_setup_out_descr - Setup an out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data
+ * @eop: True if this is the last buffer in the packet
+ * @use_short: If this is true and the data length is 7 bytes or less then
+ * a short descriptor will be used
+ *
+ * @return 0 on success
+ * Any errors from artpec6_crypto_setup_out_descr_short() or
+ * setup_out_descr_phys()
+ */
+static int
+artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
+ void *dst, unsigned int len, bool eop,
+ bool use_short)
+{
+ if (use_short && len < 7) {
+ return artpec6_crypto_setup_out_descr_short(common, dst, len,
+ eop);
+ } else {
+ int ret;
+ dma_addr_t dma_addr;
+
+ ret = artpec6_crypto_dma_map_single(common, dst, len,
+ DMA_TO_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
+ len, eop);
+ }
+}
+
+/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
+ * physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len: The length of the data buffer
+ * @intr: True if an interrupt should be fired after HW processing of this
+ * descriptor
+ *
+ */
+static int
+artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
+ dma_addr_t addr, unsigned int len, bool intr)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->in_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free IN DMA descriptors available!\n");
+ return -ENOSPC;
+ }
+ d = &dma->in[dma->in_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.intr = intr;
+ d->data.len = len;
+ d->data.buf = addr;
+ return 0;
+}
+
+/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
+ *
+ * @buffer: The virtual address to of the data buffer
+ * @len: The length of the data buffer
+ * @last: If this is the last data buffer in the request (i.e. an interrupt
+ * is needed
+ *
+ * Short descriptors are not used for the in channel
+ */
+static int
+artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
+ void *buffer, unsigned int len, bool last)
+{
+ dma_addr_t dma_addr;
+ int ret;
+
+ ret = artpec6_crypto_dma_map_single(common, buffer, len,
+ DMA_FROM_DEVICE, &dma_addr);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
+}
+
+static struct artpec6_crypto_bounce_buffer *
+artpec6_crypto_alloc_bounce(gfp_t flags)
+{
+ void *base;
+ size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
+ 2 * ARTPEC_CACHE_LINE_MAX;
+ struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
+
+ if (!bbuf)
+ return NULL;
+
+ base = bbuf + 1;
+ bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
+ return bbuf;
+}
+
+static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk, size_t size)
+{
+ struct artpec6_crypto_bounce_buffer *bbuf;
+ int ret;
+
+ bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
+ if (!bbuf)
+ return -ENOMEM;
+
+ bbuf->length = size;
+ bbuf->sg = walk->sg;
+ bbuf->offset = walk->offset;
+
+ ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
+ if (ret) {
+ kfree(bbuf);
+ return ret;
+ }
+
+ pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
+ list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
+ return 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk,
+ size_t count)
+{
+ size_t chunk;
+ int ret;
+ dma_addr_t addr;
+
+ while (walk->sg && count) {
+ chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+ addr = artpec6_crypto_walk_chunk_phys(walk);
+
+ /* When destination buffers are not aligned to the cache line
+ * size we need bounce buffers. The DMA-API requires that the
+ * entire line is owned by the DMA buffer and this holds also
+ * for the case when coherent DMA is used.
+ */
+ if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
+ chunk = min_t(dma_addr_t, chunk,
+ ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
+ addr);
+
+ pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+ ret = setup_bounce_buffer_in(common, walk, chunk);
+ } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
+ pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+ ret = setup_bounce_buffer_in(common, walk, chunk);
+ } else {
+ dma_addr_t dma_addr;
+
+ chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
+
+ pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
+
+ ret = artpec6_crypto_dma_map_page(common,
+ sg_page(walk->sg),
+ walk->sg->offset +
+ walk->offset,
+ chunk,
+ DMA_FROM_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr_phys(common,
+ dma_addr,
+ chunk, false);
+ }
+
+ if (ret)
+ return ret;
+
+ count = count - chunk;
+ artpec6_crypto_walk_advance(walk, chunk);
+ }
+
+ if (count)
+ pr_err("EOL unexpected %zu bytes left\n", count);
+
+ return count ? -EINVAL : 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk,
+ size_t count)
+{
+ size_t chunk;
+ int ret;
+ dma_addr_t addr;
+
+ while (walk->sg && count) {
+ chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+ addr = artpec6_crypto_walk_chunk_phys(walk);
+
+ pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
+
+ if (addr & 3) {
+ char buf[3];
+
+ chunk = min_t(size_t, chunk, (4-(addr&3)));
+
+ sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
+ walk->offset);
+
+ ret = artpec6_crypto_setup_out_descr_short(common, buf,
+ chunk,
+ false);
+ } else {
+ dma_addr_t dma_addr;
+
+ ret = artpec6_crypto_dma_map_page(common,
+ sg_page(walk->sg),
+ walk->sg->offset +
+ walk->offset,
+ chunk,
+ DMA_TO_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr_phys(common,
+ dma_addr,
+ chunk, false);
+ }
+
+ if (ret)
+ return ret;
+
+ count = count - chunk;
+ artpec6_crypto_walk_advance(walk, chunk);
+ }
+
+ if (count)
+ pr_err("EOL unexpected %zu bytes left\n", count);
+
+ return count ? -EINVAL : 0;
+}
+
+
+/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
+ *
+ * If the out descriptor list is non-empty, then the eop flag on the
+ * last used out descriptor will be set.
+ *
+ * @return 0 on success
+ * -EINVAL if the out descriptor is empty or has overflown
+ */
+static int
+artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
+ pr_err("%s: OUT descriptor list is %s\n",
+ MODULE_NAME, dma->out_cnt ? "empty" : "full");
+ return -EINVAL;
+
+ }
+
+ d = &dma->out[dma->out_cnt-1];
+ d->ctrl.eop = 1;
+
+ return 0;
+}
+
+/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
+ * in descriptor
+ *
+ * See artpec6_crypto_terminate_out_descrs() for return values
+ */
+static int
+artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
+ pr_err("%s: IN descriptor list is %s\n",
+ MODULE_NAME, dma->in_cnt ? "empty" : "full");
+ return -EINVAL;
+ }
+
+ d = &dma->in[dma->in_cnt-1];
+ d->ctrl.intr = 1;
+ return 0;
+}
+
+/** create_hash_pad - Create a Secure Hash conformant pad
+ *
+ * @dst: The destination buffer to write the pad. Must be at least 64 bytes
+ * @dgstlen: The total length of the hash digest in bytes
+ * @bitcount: The total length of the digest in bits
+ *
+ * @return The total number of padding bytes written to @dst
+ */
+static size_t
+create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
+{
+ unsigned int mod, target, diff, pad_bytes, size_bytes;
+ __be64 bits = __cpu_to_be64(bitcount);
+
+ switch (oper) {
+ case regk_crypto_sha1:
+ case regk_crypto_sha256:
+ case regk_crypto_hmac_sha1:
+ case regk_crypto_hmac_sha256:
+ target = 448 / 8;
+ mod = 512 / 8;
+ size_bytes = 8;
+ break;
+ default:
+ target = 896 / 8;
+ mod = 1024 / 8;
+ size_bytes = 16;
+ break;
+ }
+
+ target -= 1;
+ diff = dgstlen & (mod - 1);
+ pad_bytes = diff > target ? target + mod - diff : target - diff;
+
+ memset(dst + 1, 0, pad_bytes);
+ dst[0] = 0x80;
+
+ if (size_bytes == 16) {
+ memset(dst + 1 + pad_bytes, 0, 8);
+ memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
+ } else {
+ memcpy(dst + 1 + pad_bytes, &bits, 8);
+ }
+
+ return pad_bytes + size_bytes + 1;
+}
+
+static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
+ struct crypto_async_request *parent,
+ void (*complete)(struct crypto_async_request *req),
+ struct scatterlist *dstsg, unsigned int nbytes)
+{
+ gfp_t flags;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+ flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ common->gfp_flags = flags;
+ common->dma = kmem_cache_alloc(ac->dma_cache, flags);
+ if (!common->dma)
+ return -ENOMEM;
+
+ common->req = parent;
+ common->complete = complete;
+ return 0;
+}
+
+static void
+artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
+{
+ struct artpec6_crypto_bounce_buffer *b;
+ struct artpec6_crypto_bounce_buffer *next;
+
+ list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+ kfree(b);
+ }
+}
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+ artpec6_crypto_dma_unmap_all(common);
+ artpec6_crypto_bounce_destroy(common->dma);
+ kmem_cache_free(ac->dma_cache, common->dma);
+ common->dma = NULL;
+ return 0;
+}
+
+/*
+ * Ciphering functions.
+ */
+static int artpec6_crypto_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ void (*complete)(struct crypto_async_request *req);
+ int ret;
+
+ req_ctx = skcipher_request_ctx(req);
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ req_ctx->decrypt = 0;
+ break;
+ default:
+ break;
+ }
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ complete = artpec6_crypto_complete_cbc_encrypt;
+ break;
+ default:
+ complete = artpec6_crypto_complete_crypto;
+ break;
+ }
+
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ complete,
+ req->dst, req->cryptlen);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_crypto(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_decrypt(struct skcipher_request *req)
+{
+ int ret;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ void (*complete)(struct crypto_async_request *req);
+
+ req_ctx = skcipher_request_ctx(req);
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ req_ctx->decrypt = 1;
+ break;
+ default:
+ break;
+ }
+
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ complete = artpec6_crypto_complete_cbc_decrypt;
+ break;
+ default:
+ complete = artpec6_crypto_complete_crypto;
+ break;
+ }
+
+ ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+ complete,
+ req->dst, req->cryptlen);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_crypto(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int
+artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ unsigned int counter = be32_to_cpup((__be32 *)
+ (req->iv + iv_len - 4));
+ unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+ AES_BLOCK_SIZE;
+
+ /*
+ * The hardware uses only the last 32-bits as the counter while the
+ * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+ * the whole IV is a counter. So fallback if the counter is going to
+ * overlow.
+ */
+ if (counter + nblks < counter) {
+ int ret;
+
+ pr_debug("counter %x will overflow (nblks %u), falling back\n",
+ counter, counter + nblks);
+
+ ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
+ if (ret)
+ return ret;
+
+ {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ }
+ return ret;
+ }
+
+ return encrypt ? artpec6_crypto_encrypt(req)
+ : artpec6_crypto_decrypt(req);
+}
+
+static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
+{
+ return artpec6_crypto_ctr_crypt(req, true);
+}
+
+static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
+{
+ return artpec6_crypto_ctr_crypt(req, false);
+}
+
+/*
+ * AEAD functions
+ */
+static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
+{
+ struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
+
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ crypto_aead_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_aead_req_ctx));
+
+ return 0;
+}
+
+static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (len != 16 && len != 24 && len != 32) {
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -1;
+ }
+
+ ctx->key_length = len;
+
+ memcpy(ctx->aes_key, key, len);
+ return 0;
+}
+
+static int artpec6_crypto_aead_encrypt(struct aead_request *req)
+{
+ int ret;
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ req_ctx->decrypt = false;
+ ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+ artpec6_crypto_complete_aead,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_aead(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_aead_decrypt(struct aead_request *req)
+{
+ int ret;
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ req_ctx->decrypt = true;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ artpec6_crypto_complete_aead,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_aead(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
+{
+ struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
+ size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+ size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
+ SHA512_DIGEST_SIZE : digestsize;
+ size_t blocksize = crypto_tfm_alg_blocksize(
+ crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
+ struct artpec6_crypto_req_common *common = &req_ctx->common;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ u32 sel_ctx;
+ bool ext_ctx = false;
+ bool run_hw = false;
+ int error = 0;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ /* Upload HMAC key, must be first the first packet */
+ if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+ a6_regk_crypto_dlkey);
+ } else {
+ req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+ a7_regk_crypto_dlkey);
+ }
+
+ /* Copy and pad up the key */
+ memcpy(req_ctx->key_buffer, ctx->hmac_key,
+ ctx->hmac_key_length);
+ memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
+ blocksize - ctx->hmac_key_length);
+
+ error = artpec6_crypto_setup_out_descr(common,
+ (void *)&req_ctx->key_md,
+ sizeof(req_ctx->key_md), false, false);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->key_buffer, blocksize,
+ true, false);
+ if (error)
+ return error;
+ }
+
+ if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
+ /* Restore context */
+ sel_ctx = regk_crypto_ext;
+ ext_ctx = true;
+ } else {
+ sel_ctx = regk_crypto_init;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
+ req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+ /* If this is the final round, set the final flag */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+ req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
+ } else {
+ req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
+ req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+ /* If this is the final round, set the final flag */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+ req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
+ }
+
+ /* Setup up metadata descriptors */
+ error = artpec6_crypto_setup_out_descr(common,
+ (void *)&req_ctx->hash_md,
+ sizeof(req_ctx->hash_md), false, false);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (error)
+ return error;
+
+ if (ext_ctx) {
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->digeststate,
+ contextsize, false, false);
+
+ if (error)
+ return error;
+ }
+
+ if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
+ size_t done_bytes = 0;
+ size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
+ size_t ready_bytes = round_down(total_bytes, blocksize);
+ struct artpec6_crypto_walk walk;
+
+ run_hw = ready_bytes > 0;
+ if (req_ctx->partial_bytes && ready_bytes) {
+ /* We have a partial buffer and will at least some bytes
+ * to the HW. Empty this partial buffer before tackling
+ * the SG lists
+ */
+ memcpy(req_ctx->partial_buffer_out,
+ req_ctx->partial_buffer,
+ req_ctx->partial_bytes);
+
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->partial_buffer_out,
+ req_ctx->partial_bytes,
+ false, true);
+ if (error)
+ return error;
+
+ /* Reset partial buffer */
+ done_bytes += req_ctx->partial_bytes;
+ req_ctx->partial_bytes = 0;
+ }
+
+ artpec6_crypto_walk_init(&walk, areq->src);
+
+ error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
+ ready_bytes -
+ done_bytes);
+ if (error)
+ return error;
+
+ if (walk.sg) {
+ size_t sg_skip = ready_bytes - done_bytes;
+ size_t sg_rem = areq->nbytes - sg_skip;
+
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req_ctx->partial_buffer +
+ req_ctx->partial_bytes,
+ sg_rem, sg_skip);
+
+ req_ctx->partial_bytes += sg_rem;
+ }
+
+ req_ctx->digcnt += ready_bytes;
+ req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
+ }
+
+ /* Finalize */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
+ bool needtrim = contextsize != digestsize;
+ size_t hash_pad_len;
+ u64 digest_bits;
+ u32 oper;
+
+ if (variant == ARTPEC6_CRYPTO)
+ oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
+ else
+ oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
+
+ /* Write out the partial buffer if present */
+ if (req_ctx->partial_bytes) {
+ memcpy(req_ctx->partial_buffer_out,
+ req_ctx->partial_buffer,
+ req_ctx->partial_bytes);
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->partial_buffer_out,
+ req_ctx->partial_bytes,
+ false, true);
+ if (error)
+ return error;
+
+ req_ctx->digcnt += req_ctx->partial_bytes;
+ req_ctx->partial_bytes = 0;
+ }
+
+ if (req_ctx->hash_flags & HASH_FLAG_HMAC)
+ digest_bits = 8 * (req_ctx->digcnt + blocksize);
+ else
+ digest_bits = 8 * req_ctx->digcnt;
+
+ /* Add the hash pad */
+ hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
+ req_ctx->digcnt, digest_bits);
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->pad_buffer,
+ hash_pad_len, false,
+ true);
+ req_ctx->digcnt = 0;
+
+ if (error)
+ return error;
+
+ /* Descriptor for the final result */
+ error = artpec6_crypto_setup_in_descr(common, areq->result,
+ digestsize,
+ !needtrim);
+ if (error)
+ return error;
+
+ if (needtrim) {
+ /* Discard the extra context bytes for SHA-384 */
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->partial_buffer,
+ digestsize - contextsize, true);
+ if (error)
+ return error;
+ }
+
+ } else { /* This is not the final operation for this request */
+ if (!run_hw)
+ return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
+
+ /* Save the result to the context */
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->digeststate,
+ contextsize, false);
+ if (error)
+ return error;
+ /* fall through */
+ }
+
+ req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
+ HASH_FLAG_FINALIZE);
+
+ error = artpec6_crypto_terminate_in_descrs(common);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_terminate_out_descrs(common);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_dma_map_descs(common);
+ if (error)
+ return error;
+
+ return ARTPEC6_CRYPTO_PREPARE_HASH_START;
+}
+
+
+static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
+
+ return 0;
+}
+
+static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+ artpec6_crypto_aes_exit(tfm);
+}
+
+static int
+artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct artpec6_cryptotfm_context *ctx =
+ crypto_skcipher_ctx(cipher);
+
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ crypto_skcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->key_length = keylen;
+ return 0;
+}
+
+static int
+artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct artpec6_cryptotfm_context *ctx =
+ crypto_skcipher_ctx(cipher);
+ int ret;
+
+ ret = xts_check_key(&cipher->base, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case 32:
+ case 48:
+ case 64:
+ break;
+ default:
+ crypto_skcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->key_length = keylen;
+ return 0;
+}
+
+/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
+ *
+ * @req: The asynch request to process
+ *
+ * @return 0 if the dma job was successfully prepared
+ * <0 on error
+ *
+ * This function sets up the PDMA descriptors for a block cipher request.
+ *
+ * The required padding is added for AES-CTR using a statically defined
+ * buffer.
+ *
+ * The PDMA descriptor list will be as follows:
+ *
+ * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
+ * IN: <CIPHER_MD><data_0>...[data_n]<intr>
+ *
+ */
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
+{
+ int ret;
+ struct artpec6_crypto_walk walk;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ struct artpec6_crypto_req_common *common;
+ bool cipher_decr = false;
+ size_t cipher_klen;
+ u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
+ u32 oper;
+
+ req_ctx = skcipher_request_ctx(areq);
+ common = &req_ctx->common;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ if (variant == ARTPEC6_CRYPTO)
+ ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
+ else
+ ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
+
+ ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+ sizeof(ctx->key_md), false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+ ctx->key_length, true, false);
+ if (ret)
+ return ret;
+
+ req_ctx->cipher_md = 0;
+
+ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
+ cipher_klen = ctx->key_length/2;
+ else
+ cipher_klen = ctx->key_length;
+
+ /* Metadata */
+ switch (cipher_klen) {
+ case 16:
+ cipher_len = regk_crypto_key_128;
+ break;
+ case 24:
+ cipher_len = regk_crypto_key_192;
+ break;
+ case 32:
+ cipher_len = regk_crypto_key_256;
+ break;
+ default:
+ pr_err("%s: Invalid key length %d!\n",
+ MODULE_NAME, ctx->key_length);
+ return -EINVAL;
+ }
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ oper = regk_crypto_aes_ecb;
+ cipher_decr = req_ctx->decrypt;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ oper = regk_crypto_aes_cbc;
+ cipher_decr = req_ctx->decrypt;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
+ oper = regk_crypto_aes_ctr;
+ cipher_decr = false;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ oper = regk_crypto_aes_xts;
+ cipher_decr = req_ctx->decrypt;
+
+ if (variant == ARTPEC6_CRYPTO)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
+ else
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
+ break;
+
+ default:
+ pr_err("%s: Invalid cipher mode %d!\n",
+ MODULE_NAME, ctx->crypto_type);
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+ cipher_len);
+ if (cipher_decr)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+ } else {
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+ cipher_len);
+ if (cipher_decr)
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+ }
+
+ ret = artpec6_crypto_setup_out_descr(common,
+ &req_ctx->cipher_md,
+ sizeof(req_ctx->cipher_md),
+ false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (ret)
+ return ret;
+
+ if (iv_len) {
+ ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
+ false, false);
+ if (ret)
+ return ret;
+ }
+ /* Data out */
+ artpec6_crypto_walk_init(&walk, areq->src);
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
+ if (ret)
+ return ret;
+
+ /* Data in */
+ artpec6_crypto_walk_init(&walk, areq->dst);
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
+ if (ret)
+ return ret;
+
+ /* CTR-mode padding required by the HW. */
+ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
+ ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
+ size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
+ areq->cryptlen;
+
+ if (pad) {
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->pad_buffer,
+ pad, false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer, pad,
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = artpec6_crypto_terminate_out_descrs(common);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_terminate_in_descrs(common);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_dma_map_descs(common);
+}
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq)
+{
+ size_t count;
+ int ret;
+ size_t input_length;
+ struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+ struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
+ struct artpec6_crypto_req_common *common = &req_ctx->common;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ u32 md_cipher_len;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ /* Key */
+ if (variant == ARTPEC6_CRYPTO) {
+ ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+ a6_regk_crypto_dlkey);
+ } else {
+ ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+ a7_regk_crypto_dlkey);
+ }
+ ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+ sizeof(ctx->key_md), false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+ ctx->key_length, true, false);
+ if (ret)
+ return ret;
+
+ req_ctx->cipher_md = 0;
+
+ switch (ctx->key_length) {
+ case 16:
+ md_cipher_len = regk_crypto_key_128;
+ break;
+ case 24:
+ md_cipher_len = regk_crypto_key_192;
+ break;
+ case 32:
+ md_cipher_len = regk_crypto_key_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
+ regk_crypto_aes_gcm);
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+ md_cipher_len);
+ if (req_ctx->decrypt)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+ } else {
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
+ regk_crypto_aes_gcm);
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+ md_cipher_len);
+ if (req_ctx->decrypt)
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+ }
+
+ ret = artpec6_crypto_setup_out_descr(common,
+ (void *) &req_ctx->cipher_md,
+ sizeof(req_ctx->cipher_md), false,
+ false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (ret)
+ return ret;
+
+ /* For the decryption, cryptlen includes the tag. */
+ input_length = areq->cryptlen;
+ if (req_ctx->decrypt)
+ input_length -= AES_BLOCK_SIZE;
+
+ /* Prepare the context buffer */
+ req_ctx->hw_ctx.aad_length_bits =
+ __cpu_to_be64(8*areq->assoclen);
+
+ req_ctx->hw_ctx.text_length_bits =
+ __cpu_to_be64(8*input_length);
+
+ memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
+ // The HW omits the initial increment of the counter field.
+ crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+
+ ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
+ sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
+ if (ret)
+ return ret;
+
+ {
+ struct artpec6_crypto_walk walk;
+
+ artpec6_crypto_walk_init(&walk, areq->src);
+
+ /* Associated data */
+ count = areq->assoclen;
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(areq->assoclen, 16)) {
+ size_t assoc_pad = 16 - (areq->assoclen % 16);
+ /* The HW mandates zero padding here */
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->zero_buffer,
+ assoc_pad, false,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ /* Data to crypto */
+ count = input_length;
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(input_length, 16)) {
+ size_t crypto_pad = 16 - (input_length % 16);
+ /* The HW mandates zero padding here */
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->zero_buffer,
+ crypto_pad,
+ false,
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Data from crypto */
+ {
+ struct artpec6_crypto_walk walk;
+ size_t output_len = areq->cryptlen;
+
+ if (req_ctx->decrypt)
+ output_len -= AES_BLOCK_SIZE;
+
+ artpec6_crypto_walk_init(&walk, areq->dst);
+
+ /* skip associated data in the output */
+ count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
+ if (count)
+ return -EINVAL;
+
+ count = output_len;
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
+ if (ret)
+ return ret;
+
+ /* Put padding between the cryptotext and the auth tag */
+ if (!IS_ALIGNED(output_len, 16)) {
+ size_t crypto_pad = 16 - (output_len % 16);
+
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer,
+ crypto_pad, false);
+ if (ret)
+ return ret;
+ }
+
+ /* The authentication tag shall follow immediately after
+ * the output ciphertext. For decryption it is put in a context
+ * buffer for later compare against the input tag.
+ */
+ count = AES_BLOCK_SIZE;
+
+ if (req_ctx->decrypt) {
+ ret = artpec6_crypto_setup_in_descr(common,
+ req_ctx->decryption_tag, count, false);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
+ count);
+ if (ret)
+ return ret;
+ }
+
+ }
+
+ ret = artpec6_crypto_terminate_in_descrs(common);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_terminate_out_descrs(common);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_dma_map_descs(common);
+}
+
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+{
+ struct artpec6_crypto_req_common *req;
+
+ while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
+ req = list_first_entry(&ac->queue,
+ struct artpec6_crypto_req_common,
+ list);
+ list_move_tail(&req->list, &ac->pending);
+ artpec6_crypto_start_dma(req);
+
+ req->req->complete(req->req, -EINPROGRESS);
+ }
+
+ /*
+ * In some cases, the hardware can raise an in_eop_flush interrupt
+ * before actually updating the status, so we have an timer which will
+ * recheck the status on timeout. Since the cases are expected to be
+ * very rare, we use a relatively large timeout value. There should be
+ * no noticeable negative effect if we timeout spuriously.
+ */
+ if (ac->pending_count)
+ mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
+ else
+ del_timer(&ac->timer);
+}
+
+static void artpec6_crypto_timeout(unsigned long data)
+{
+ struct artpec6_crypto *ac = (struct artpec6_crypto *) data;
+
+ dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
+
+ tasklet_schedule(&ac->task);
+}
+
+static void artpec6_crypto_task(unsigned long data)
+{
+ struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
+ struct artpec6_crypto_req_common *req;
+ struct artpec6_crypto_req_common *n;
+
+ if (list_empty(&ac->pending)) {
+ pr_debug("Spurious IRQ\n");
+ return;
+ }
+
+ spin_lock_bh(&ac->queue_lock);
+
+ list_for_each_entry_safe(req, n, &ac->pending, list) {
+ struct artpec6_crypto_dma_descriptors *dma = req->dma;
+ u32 stat;
+
+ dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
+ sizeof(dma->stat[0]),
+ DMA_BIDIRECTIONAL);
+
+ stat = req->dma->stat[req->dma->in_cnt-1];
+
+ /* A non-zero final status descriptor indicates
+ * this job has finished.
+ */
+ pr_debug("Request %p status is %X\n", req, stat);
+ if (!stat)
+ break;
+
+ /* Allow testing of timeout handling with fault injection */
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&artpec6_crypto_fail_status_read, 1))
+ continue;
+#endif
+
+ pr_debug("Completing request %p\n", req);
+
+ list_del(&req->list);
+
+ artpec6_crypto_dma_unmap_all(req);
+ artpec6_crypto_copy_bounce_buffers(req);
+
+ ac->pending_count--;
+ artpec6_crypto_common_destroy(req);
+ req->complete(req->req);
+ }
+
+ artpec6_crypto_process_queue(ac);
+
+ spin_unlock_bh(&ac->queue_lock);
+}
+
+static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
+{
+ req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
+{
+ struct skcipher_request *cipher_req = container_of(req,
+ struct skcipher_request, base);
+
+ scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
+ cipher_req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
+{
+ struct skcipher_request *cipher_req = container_of(req,
+ struct skcipher_request, base);
+
+ scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
+ cipher_req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ req->complete(req, 0);
+}
+
+static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
+{
+ int result = 0;
+
+ /* Verify GCM hashtag. */
+ struct aead_request *areq = container_of(req,
+ struct aead_request, base);
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+
+ if (req_ctx->decrypt) {
+ u8 input_tag[AES_BLOCK_SIZE];
+
+ sg_pcopy_to_buffer(areq->src,
+ sg_nents(areq->src),
+ input_tag,
+ AES_BLOCK_SIZE,
+ areq->assoclen + areq->cryptlen -
+ AES_BLOCK_SIZE);
+
+ if (memcmp(req_ctx->decryption_tag,
+ input_tag,
+ AES_BLOCK_SIZE)) {
+ pr_debug("***EBADMSG:\n");
+ print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
+ input_tag, AES_BLOCK_SIZE, true);
+ print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
+ req_ctx->decryption_tag,
+ AES_BLOCK_SIZE, true);
+
+ result = -EBADMSG;
+ }
+ }
+
+ req->complete(req, result);
+}
+
+static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
+{
+ req->complete(req, 0);
+}
+
+
+/*------------------- Hash functions -----------------------------------------*/
+static int
+artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
+ size_t blocksize;
+ int ret;
+
+ if (!keylen) {
+ pr_err("Invalid length (%d) of HMAC key\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ if (keylen > blocksize) {
+ SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
+
+ hdesc->tfm = tfm_ctx->child_hash;
+ hdesc->flags = crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ tfm_ctx->hmac_key_length = blocksize;
+ ret = crypto_shash_digest(hdesc, key, keylen,
+ tfm_ctx->hmac_key);
+ if (ret)
+ return ret;
+
+ } else {
+ memcpy(tfm_ctx->hmac_key, key, keylen);
+ tfm_ctx->hmac_key_length = keylen;
+ }
+
+ return 0;
+}
+
+static int
+artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+ u32 oper;
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
+ if (hmac)
+ req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
+
+ switch (type) {
+ case ARTPEC6_CRYPTO_HASH_SHA1:
+ oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA256:
+ oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA384:
+ oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA512:
+ oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
+ break;
+
+ default:
+ pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO)
+ req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
+ else
+ req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
+
+ return 0;
+}
+
+static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+ int ret;
+
+ if (!req_ctx->common.dma) {
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ artpec6_crypto_complete_hash,
+ NULL, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = artpec6_crypto_prepare_hash(req);
+ switch (ret) {
+ case ARTPEC6_CRYPTO_PREPARE_HASH_START:
+ ret = artpec6_crypto_submit(&req_ctx->common);
+ break;
+
+ case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
+ ret = 0;
+ /* Fallthrough */
+
+ default:
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ break;
+ }
+
+ return ret;
+}
+
+static int artpec6_crypto_hash_final(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hash_update(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha1_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+}
+
+static int artpec6_crypto_sha1_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha256_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+}
+
+static int artpec6_crypto_sha256_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+}
+
+static int __maybe_unused
+artpec6_crypto_sha384_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha512_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+}
+
+static int artpec6_crypto_sha512_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+}
+
+static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+}
+
+static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
+ const char *base_hash_name)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct artpec6_hash_request_context));
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ if (base_hash_name) {
+ struct crypto_shash *child;
+
+ child = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ tfm_ctx->child_hash = child;
+ }
+
+ return 0;
+}
+
+static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, NULL);
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha256");
+}
+
+static int __maybe_unused
+artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha384");
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha512");
+}
+
+static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ if (tfm_ctx->child_hash)
+ crypto_free_shash(tfm_ctx->child_hash);
+
+ memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+ tfm_ctx->hmac_key_length = 0;
+}
+
+static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
+{
+ const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+ struct artpec6_hash_export_state *state = out;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+
+ BUILD_BUG_ON(sizeof(state->partial_buffer) !=
+ sizeof(ctx->partial_buffer));
+ BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
+
+ state->digcnt = ctx->digcnt;
+ state->partial_bytes = ctx->partial_bytes;
+ state->hash_flags = ctx->hash_flags;
+
+ if (variant == ARTPEC6_CRYPTO)
+ state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
+ else
+ state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
+
+ memcpy(state->partial_buffer, ctx->partial_buffer,
+ sizeof(state->partial_buffer));
+ memcpy(state->digeststate, ctx->digeststate,
+ sizeof(state->digeststate));
+
+ return 0;
+}
+
+static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
+{
+ struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+ const struct artpec6_hash_export_state *state = in;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->digcnt = state->digcnt;
+ ctx->partial_bytes = state->partial_bytes;
+ ctx->hash_flags = state->hash_flags;
+
+ if (variant == ARTPEC6_CRYPTO)
+ ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
+ else
+ ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
+
+ memcpy(ctx->partial_buffer, state->partial_buffer,
+ sizeof(state->partial_buffer));
+ memcpy(ctx->digeststate, state->digeststate,
+ sizeof(state->digeststate));
+
+ return 0;
+}
+
+static int init_crypto_hw(struct artpec6_crypto *ac)
+{
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ u32 out_descr_buf_size;
+ u32 out_data_buf_size;
+ u32 in_data_buf_size;
+ u32 in_descr_buf_size;
+ u32 in_stat_buf_size;
+ u32 in, out;
+
+ /*
+ * The PDMA unit contains 1984 bytes of internal memory for the OUT
+ * channels and 1024 bytes for the IN channel. This is an elastic
+ * memory used to internally store the descriptors and data. The values
+ * ares specified in 64 byte incremements. Trustzone buffers are not
+ * used at this stage.
+ */
+ out_data_buf_size = 16; /* 1024 bytes for data */
+ out_descr_buf_size = 15; /* 960 bytes for descriptors */
+ in_data_buf_size = 8; /* 512 bytes for data */
+ in_descr_buf_size = 4; /* 256 bytes for descriptors */
+ in_stat_buf_size = 4; /* 256 bytes for stat descrs */
+
+ BUILD_BUG_ON_MSG((out_data_buf_size
+ + out_descr_buf_size) * 64 > 1984,
+ "Invalid OUT configuration");
+
+ BUILD_BUG_ON_MSG((in_data_buf_size
+ + in_descr_buf_size
+ + in_stat_buf_size) * 64 > 1024,
+ "Invalid IN configuration");
+
+ in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
+ FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
+ FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
+
+ out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
+ FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
+
+ writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
+ writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
+ writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
+ writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
+ A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
+ base + A6_PDMA_INTR_MASK);
+ } else {
+ writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
+ writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
+ writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
+ A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
+ base + A7_PDMA_INTR_MASK);
+ }
+
+ return 0;
+}
+
+static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
+{
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
+ writel_relaxed(0, base + A6_PDMA_IN_CFG);
+ writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+ } else {
+ writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
+ writel_relaxed(0, base + A7_PDMA_IN_CFG);
+ writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+ }
+
+ writel_relaxed(0, base + PDMA_OUT_CFG);
+
+}
+
+static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
+{
+ struct artpec6_crypto *ac = dev_id;
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ u32 mask_in_data, mask_in_eop_flush;
+ u32 in_cmd_flush_stat, in_cmd_reg;
+ u32 ack_intr_reg;
+ u32 ack = 0;
+ u32 intr;
+
+ if (variant == ARTPEC6_CRYPTO) {
+ intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
+ mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
+ mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
+ in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
+ in_cmd_reg = A6_PDMA_IN_CMD;
+ ack_intr_reg = A6_PDMA_ACK_INTR;
+ } else {
+ intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
+ mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
+ mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
+ in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
+ in_cmd_reg = A7_PDMA_IN_CMD;
+ ack_intr_reg = A7_PDMA_ACK_INTR;
+ }
+
+ /* We get two interrupt notifications from each job.
+ * The in_data means all data was sent to memory and then
+ * we request a status flush command to write the per-job
+ * status to its status vector. This ensures that the
+ * tasklet can detect exactly how many submitted jobs
+ * that have finished.
+ */
+ if (intr & mask_in_data)
+ ack |= mask_in_data;
+
+ if (intr & mask_in_eop_flush)
+ ack |= mask_in_eop_flush;
+ else
+ writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
+
+ writel_relaxed(ack, base + ack_intr_reg);
+
+ if (intr & mask_in_eop_flush)
+ tasklet_schedule(&ac->task);
+
+ return IRQ_HANDLED;
+}
+
+/*------------------- Algorithm definitions ----------------------------------*/
+
+/* Hashes */
+static struct ahash_alg hash_algos[] = {
+ /* SHA-1 */
+ {
+ .init = artpec6_crypto_sha1_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha1_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "artpec-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* SHA-256 */
+ {
+ .init = artpec6_crypto_sha256_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha256_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "artpec-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-256 */
+ {
+ .init = artpec6_crypto_hmac_sha256_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha256_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "artpec-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+};
+
+static struct ahash_alg artpec7_hash_algos[] = {
+ /* SHA-384 */
+ {
+ .init = artpec6_crypto_sha384_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha384_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "artpec-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-384 */
+ {
+ .init = artpec6_crypto_hmac_sha384_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha384_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "artpec-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* SHA-512 */
+ {
+ .init = artpec6_crypto_sha512_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha512_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "artpec-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-512 */
+ {
+ .init = artpec6_crypto_hmac_sha512_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha512_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "artpec-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+};
+
+/* Crypto */
+static struct skcipher_alg crypto_algos[] = {
+ /* AES - ECB */
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "artpec6-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_ecb_init,
+ .exit = artpec6_crypto_aes_exit,
+ },
+ /* AES - CTR */
+ {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "artpec6-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_ctr_encrypt,
+ .decrypt = artpec6_crypto_ctr_decrypt,
+ .init = artpec6_crypto_aes_ctr_init,
+ .exit = artpec6_crypto_aes_ctr_exit,
+ },
+ /* AES - CBC */
+ {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "artpec6-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_cbc_init,
+ .exit = artpec6_crypto_aes_exit
+ },
+ /* AES - XTS */
+ {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "artpec6-xts-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = 2*AES_MIN_KEY_SIZE,
+ .max_keysize = 2*AES_MAX_KEY_SIZE,
+ .ivsize = 16,
+ .setkey = artpec6_crypto_xts_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_xts_init,
+ .exit = artpec6_crypto_aes_exit,
+ },
+};
+
+static struct aead_alg aead_algos[] = {
+ {
+ .init = artpec6_crypto_aead_init,
+ .setkey = artpec6_crypto_aead_set_key,
+ .encrypt = artpec6_crypto_aead_encrypt,
+ .decrypt = artpec6_crypto_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "artpec-gcm-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ }
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dbgfs_u32 {
+ char *name;
+ mode_t mode;
+ u32 *flag;
+ char *desc;
+};
+
+static void artpec6_crypto_init_debugfs(void)
+{
+ dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
+
+ if (!dbgfs_root || IS_ERR(dbgfs_root)) {
+ dbgfs_root = NULL;
+ pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
+ return;
+ }
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_status_read", dbgfs_root,
+ &artpec6_crypto_fail_status_read);
+
+ fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
+ &artpec6_crypto_fail_dma_array_full);
+#endif
+}
+
+static void artpec6_crypto_free_debugfs(void)
+{
+ if (!dbgfs_root)
+ return;
+
+ debugfs_remove_recursive(dbgfs_root);
+ dbgfs_root = NULL;
+}
+#endif
+
+static const struct of_device_id artpec6_crypto_of_match[] = {
+ { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
+ { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
+ {}
+};
+MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
+
+static int artpec6_crypto_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ enum artpec6_crypto_variant variant;
+ struct artpec6_crypto *ac;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct resource *res;
+ int irq;
+ int err;
+
+ if (artpec6_crypto_dev)
+ return -ENODEV;
+
+ match = of_match_node(artpec6_crypto_of_match, dev->of_node);
+ if (!match)
+ return -EINVAL;
+
+ variant = (enum artpec6_crypto_variant)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENODEV;
+
+ ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
+ GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ac);
+ ac->variant = variant;
+
+ spin_lock_init(&ac->queue_lock);
+ INIT_LIST_HEAD(&ac->queue);
+ INIT_LIST_HEAD(&ac->pending);
+ setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac);
+
+ ac->base = base;
+
+ ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
+ sizeof(struct artpec6_crypto_dma_descriptors),
+ 64,
+ 0,
+ NULL);
+ if (!ac->dma_cache)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ artpec6_crypto_init_debugfs();
+#endif
+
+ tasklet_init(&ac->task, artpec6_crypto_task,
+ (unsigned long)ac);
+
+ ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ GFP_KERNEL);
+ if (!ac->pad_buffer)
+ return -ENOMEM;
+ ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
+
+ ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ GFP_KERNEL);
+ if (!ac->zero_buffer)
+ return -ENOMEM;
+ ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
+
+ err = init_crypto_hw(ac);
+ if (err)
+ goto free_cache;
+
+ err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
+ "artpec6-crypto", ac);
+ if (err)
+ goto disable_hw;
+
+ artpec6_crypto_dev = &pdev->dev;
+
+ err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ahashes\n");
+ goto disable_hw;
+ }
+
+ if (variant != ARTPEC6_CRYPTO) {
+ err = crypto_register_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ahashes\n");
+ goto unregister_ahashes;
+ }
+ }
+
+ err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ciphers\n");
+ goto unregister_a7_ahashes;
+ }
+
+ err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+ if (err) {
+ dev_err(dev, "Failed to register aeads\n");
+ goto unregister_algs;
+ }
+
+ return 0;
+
+unregister_algs:
+ crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+unregister_a7_ahashes:
+ if (variant != ARTPEC6_CRYPTO)
+ crypto_unregister_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+unregister_ahashes:
+ crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+disable_hw:
+ artpec6_crypto_disable_hw(ac);
+free_cache:
+ kmem_cache_destroy(ac->dma_cache);
+ return err;
+}
+
+static int artpec6_crypto_remove(struct platform_device *pdev)
+{
+ struct artpec6_crypto *ac = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+ if (ac->variant != ARTPEC6_CRYPTO)
+ crypto_unregister_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+ crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+ crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+
+ tasklet_disable(&ac->task);
+ devm_free_irq(&pdev->dev, irq, ac);
+ tasklet_kill(&ac->task);
+ del_timer_sync(&ac->timer);
+
+ artpec6_crypto_disable_hw(ac);
+
+ kmem_cache_destroy(ac->dma_cache);
+#ifdef CONFIG_DEBUG_FS
+ artpec6_crypto_free_debugfs();
+#endif
+ return 0;
+}
+
+static struct platform_driver artpec6_crypto_driver = {
+ .probe = artpec6_crypto_probe,
+ .remove = artpec6_crypto_remove,
+ .driver = {
+ .name = "artpec6-crypto",
+ .owner = THIS_MODULE,
+ .of_match_table = artpec6_crypto_of_match,
+ },
+};
+
+module_platform_driver(artpec6_crypto_driver);
+
+MODULE_AUTHOR("Axis Communications AB");
+MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 9cfd36c1bcb6..8685c7e4debd 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -90,8 +90,6 @@ static int aead_pri = 150;
module_param(aead_pri, int, 0644);
MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
-#define MAX_SPUS 16
-
/* A type 3 BCM header, expected to precede the SPU header for SPU-M.
* Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
* 0x60 - ring 0
@@ -120,7 +118,7 @@ static u8 select_channel(void)
{
u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
- return chan_idx % iproc_priv.spu.num_spu;
+ return chan_idx % iproc_priv.spu.num_chan;
}
/**
@@ -4528,8 +4526,13 @@ static void spu_functions_register(struct device *dev,
*/
static int spu_mb_init(struct device *dev)
{
- struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu];
- int err;
+ struct mbox_client *mcl = &iproc_priv.mcl;
+ int err, i;
+
+ iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
+ sizeof(struct mbox_chan *), GFP_KERNEL);
+ if (!iproc_priv.mbox)
+ return -ENOMEM;
mcl->dev = dev;
mcl->tx_block = false;
@@ -4538,25 +4541,33 @@ static int spu_mb_init(struct device *dev)
mcl->rx_callback = spu_rx_callback;
mcl->tx_done = NULL;
- iproc_priv.mbox[iproc_priv.spu.num_spu] =
- mbox_request_channel(mcl, 0);
- if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) {
- err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]);
- dev_err(dev,
- "Mbox channel %d request failed with err %d",
- iproc_priv.spu.num_spu, err);
- iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL;
- return err;
+ for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+ iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
+ if (IS_ERR(iproc_priv.mbox[i])) {
+ err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ dev_err(dev,
+ "Mbox channel %d request failed with err %d",
+ i, err);
+ iproc_priv.mbox[i] = NULL;
+ goto free_channels;
+ }
}
return 0;
+free_channels:
+ for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+ if (iproc_priv.mbox[i])
+ mbox_free_channel(iproc_priv.mbox[i]);
+ }
+
+ return err;
}
static void spu_mb_release(struct platform_device *pdev)
{
int i;
- for (i = 0; i < iproc_priv.spu.num_spu; i++)
+ for (i = 0; i < iproc_priv.spu.num_chan; i++)
mbox_free_channel(iproc_priv.mbox[i]);
}
@@ -4567,7 +4578,7 @@ static void spu_counters_init(void)
atomic_set(&iproc_priv.session_count, 0);
atomic_set(&iproc_priv.stream_count, 0);
- atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu);
+ atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
atomic64_set(&iproc_priv.bytes_in, 0);
atomic64_set(&iproc_priv.bytes_out, 0);
for (i = 0; i < SPU_OP_NUM; i++) {
@@ -4809,47 +4820,38 @@ static int spu_dt_read(struct platform_device *pdev)
struct resource *spu_ctrl_regs;
const struct of_device_id *match;
const struct spu_type_subtype *matched_spu_type;
- void __iomem *spu_reg_vbase[MAX_SPUS];
- int err;
+ struct device_node *dn = pdev->dev.of_node;
+ int err, i;
- match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
- matched_spu_type = match->data;
+ /* Count number of mailbox channels */
+ spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
- if (iproc_priv.spu.num_spu > 1) {
- /* If this is 2nd or later SPU, make sure it's same type */
- if ((spu->spu_type != matched_spu_type->type) ||
- (spu->spu_subtype != matched_spu_type->subtype)) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Multiple SPU types not allowed");
- return err;
- }
- } else {
- /* Record type of first SPU */
- spu->spu_type = matched_spu_type->type;
- spu->spu_subtype = matched_spu_type->subtype;
+ match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
}
- /* Get and map SPU registers */
- spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!spu_ctrl_regs) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid/missing registers for SPU\n");
- return err;
- }
+ matched_spu_type = match->data;
- spu_reg_vbase[iproc_priv.spu.num_spu] =
- devm_ioremap_resource(dev, spu_ctrl_regs);
- if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) {
- err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
- err);
- spu_reg_vbase[iproc_priv.spu.num_spu] = NULL;
- return err;
- }
+ spu->spu_type = matched_spu_type->type;
+ spu->spu_subtype = matched_spu_type->subtype;
- dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu);
+ i = 0;
+ for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
+ platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
- spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase;
+ spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
+ if (IS_ERR(spu->reg_vbase[i])) {
+ err = PTR_ERR(spu->reg_vbase[i]);
+ dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ err);
+ spu->reg_vbase[i] = NULL;
+ return err;
+ }
+ }
+ spu->num_spu = i;
+ dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
return 0;
}
@@ -4860,8 +4862,8 @@ int bcm_spu_probe(struct platform_device *pdev)
struct spu_hw *spu = &iproc_priv.spu;
int err = 0;
- iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev;
- platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu],
+ iproc_priv.pdev = pdev;
+ platform_set_drvdata(iproc_priv.pdev,
&iproc_priv);
err = spu_dt_read(pdev);
@@ -4872,12 +4874,6 @@ int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
- iproc_priv.spu.num_spu++;
-
- /* If already initialized, we've just added another SPU and are done */
- if (iproc_priv.inited)
- return 0;
-
if (spu->spu_type == SPU_TYPE_SPUM)
iproc_priv.bcm_hdr_len = 8;
else if (spu->spu_type == SPU_TYPE_SPU2)
@@ -4893,8 +4889,6 @@ int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto fail_reg;
- iproc_priv.inited = true;
-
return 0;
fail_reg:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 51dca529ce8f..57a55eb2a255 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -427,10 +427,13 @@ struct spu_hw {
/* The number of SPUs on this platform */
u32 num_spu;
+
+ /* The number of SPU channels on this platform */
+ u32 num_chan;
};
struct device_private {
- struct platform_device *pdev[MAX_SPUS];
+ struct platform_device *pdev;
struct spu_hw spu;
@@ -470,12 +473,10 @@ struct device_private {
/* Number of ICV check failures for AEAD messages */
atomic_t bad_icv;
- struct mbox_client mcl[MAX_SPUS];
- /* Array of mailbox channel pointers, one for each channel */
- struct mbox_chan *mbox[MAX_SPUS];
+ struct mbox_client mcl;
- /* Driver initialized */
- bool inited;
+ /* Array of mailbox channel pointers, one for each channel */
+ struct mbox_chan **mbox;
};
extern struct device_private iproc_priv;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0488b7f81dcf..54f3b375a453 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -81,40 +81,6 @@
#define debug(format, arg...)
#endif
-#ifdef DEBUG
-#include <linux/highmem.h>
-
-static void dbg_dump_sg(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii)
-{
- struct scatterlist *it;
- void *it_page;
- size_t len;
- void *buf;
-
- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
- /*
- * make sure the scatterlist's page
- * has a valid virtual memory mapping
- */
- it_page = kmap_atomic(sg_page(it));
- if (unlikely(!it_page)) {
- printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
- return;
- }
-
- buf = it_page + it->offset;
- len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
- tlen -= len;
-
- kunmap_atomic(it_page);
- }
-}
-#endif
-
static struct list_head alg_list;
struct caam_alg_entry {
@@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
ivsize, 1);
pr_err("asked=%d, nbytes%d\n",
(int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
-#ifdef DEBUG
- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1);
-#endif
+ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ req->assoclen + req->cryptlen, 1);
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 6f9c7ec0e339..530c14ee32de 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -599,7 +599,7 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
+ JUMP_COND_SHRD);
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
@@ -688,8 +688,7 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 78c4c0485c58..2eefc4a26bc2 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -12,7 +12,6 @@
#include "intern.h"
#include "desc_constr.h"
#include "error.h"
-#include "sg_sw_sec4.h"
#include "sg_sw_qm.h"
#include "key_gen.h"
#include "qi.h"
@@ -399,6 +398,7 @@ badkey:
* @iv_dma: dma address of iv for checking continuity and link table
* @qm_sg_bytes: length of dma mapped h/w link table
* @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
* @assoclen_dma: bus physical mapped address of req->assoclen
* @drv_req: driver-specific request structure
* @sgt: the h/w link table
@@ -409,8 +409,12 @@ struct aead_edesc {
dma_addr_t iv_dma;
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -431,6 +435,9 @@ struct ablkcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_ABLKCIPHER_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -660,6 +667,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
*/
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
@@ -670,7 +685,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->drv_req.cbk = aead_done;
edesc->drv_req.drv_ctx = drv_ctx;
- edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
@@ -776,9 +792,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *qidev = caam_ctx->qidev;
-#ifdef DEBUG
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
#endif
@@ -791,14 +807,21 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(qidev, edesc, req);
qi_cache_free(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
ablkcipher_request_complete(req, status);
}
@@ -880,6 +903,15 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
dst_sg_idx = qm_sg_ents;
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* allocate space for base edesc and link tables */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (unlikely(!edesc)) {
@@ -892,7 +924,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sg_table = &edesc->sgt[0];
edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
edesc->drv_req.app_ctx = req;
@@ -1026,6 +1057,14 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
qm_sg_ents += 1 + mapped_dst_nents;
}
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* allocate space for base edesc and link tables */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (!edesc) {
@@ -1968,7 +2007,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi",
.cra_blocksize = DES_BLOCK_SIZE,
},
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 910ec61cae09..698580b60b2f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -791,8 +791,8 @@ static int ahash_update_ctx(struct ahash_request *req)
to_hash - *buflen,
*next_buflen, 0);
} else {
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
+ 1);
}
desc = edesc->hw_desc;
@@ -882,8 +882,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 41398da3edf4..fde07d4ff019 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -285,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
if (err)
return err;
- err = caam_init_buf(ctx, 1);
- if (err)
- return err;
-
- return 0;
+ return caam_init_buf(ctx, 1);
}
static struct hwrng caam_rng = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dd353e342c12..dacb53fb690e 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -17,6 +17,8 @@
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
+bool caam_dpaa2;
+EXPORT_SYMBOL(caam_dpaa2);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -319,8 +321,11 @@ static int caam_remove(struct platform_device *pdev)
caam_qi_shutdown(ctrlpriv->qidev);
#endif
- /* De-initialize RNG state handles initialized by this driver. */
- if (ctrlpriv->rng4_sh_init)
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
+ */
+ if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
@@ -444,7 +449,6 @@ static int caam_probe(struct platform_device *pdev)
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
- ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
/* Enable clocking */
@@ -553,12 +557,17 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
- * long pointers in master configuration register
+ * long pointers in master configuration register.
+ * In case of DPAA 2.x, Management Complex firmware performs
+ * the configuration.
*/
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+ if (!caam_dpaa2)
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ?
+ MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -587,7 +596,9 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR3_START);
if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ if (caam_dpaa2)
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
@@ -630,11 +641,9 @@ static int caam_probe(struct platform_device *pdev)
ring++;
}
- /* Check to see if QI present. If so, enable */
- ctrlpriv->qi_present =
- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
- CTPR_MS_QI_MASK);
- if (ctrlpriv->qi_present) {
+ /* Check to see if (DPAA 1.x) QI present. If so, enable */
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
@@ -662,8 +671,10 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
*/
- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!caam_dpaa2 &&
+ (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -731,63 +742,43 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
caam_get_era());
- dev_info(dev, "job rings = %d, qi = %d\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+ caam_dpaa2 ? "yes" : "no");
#ifdef CONFIG_DEBUG_FS
-
- ctrlpriv->ctl_rq_dequeued =
- debugfs_create_file("rq_dequeued",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_req =
- debugfs_create_file("ob_rq_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_req =
- debugfs_create_file("ib_rq_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_bytes =
- debugfs_create_file("ob_bytes_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_prot_bytes =
- debugfs_create_file("ob_bytes_protected",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_bytes =
- debugfs_create_file("ib_bytes_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_valid_bytes =
- debugfs_create_file("ib_bytes_validated",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
- &caam_fops_u64_ro);
+ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+ &caam_fops_u64_ro);
/* Controller level - global status values */
- ctrlpriv->ctl_faultaddr =
- debugfs_create_file("fault_addr",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultdetail =
- debugfs_create_file("fault_detail",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultstatus =
- debugfs_create_file("fault_status",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status,
- &caam_fops_u32_ro);
+ debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status,
+ &caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index cac5402a46eb..7e7bf68c9ef5 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -10,4 +10,6 @@
/* Prototypes for backend-level services exposed to APIs */
int caam_get_era(void);
+extern bool caam_dpaa2;
+
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6f44ccb55c63..3d639f3b45aa 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -9,6 +9,46 @@
#include "desc.h"
#include "error.h"
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{
+ struct scatterlist *it;
+ void *it_page;
+ size_t len;
+ void *buf;
+
+ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+ */
+ it_page = kmap_atomic(sg_page(it));
+ if (unlikely(!it_page)) {
+ pr_err("caam_dump_sg: kmap failed\n");
+ return;
+ }
+
+ buf = it_page + it->offset;
+ len = min_t(size_t, tlen, it->length);
+ print_hex_dump(level, prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
+ tlen -= len;
+
+ kunmap_atomic(it_page);
+ }
+}
+#else
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{}
+#endif /* DEBUG */
+EXPORT_SYMBOL(caam_dump_sg);
+
static const struct {
u8 value;
const char *error_text;
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index b6350b0d9153..250e1a21c473 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,4 +8,8 @@
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii);
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 85b6c5835b8f..a52361258d3a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -64,12 +64,9 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-
- struct device *dev;
#ifdef CONFIG_CAAM_QI
struct device *qidev;
#endif
- struct platform_device *pdev;
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
@@ -105,16 +102,8 @@ struct caam_drv_private {
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
- struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
- struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
- struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
- struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
-
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
-#ifdef CONFIG_CAAM_QI
- struct dentry *qi_congested;
-#endif
#endif
};
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1ccfb317d468..d258953ff488 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -9,6 +9,7 @@
#include <linux/of_address.h>
#include "compat.h"
+#include "ctrl.h"
#include "regs.h"
#include "jr.h"
#include "desc.h"
@@ -499,7 +500,11 @@ static int caam_jr_probe(struct platform_device *pdev)
jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
+ if (caam_dpaa2)
+ error = dma_set_mask_and_coherent(jrdev,
+ DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop,
+ "fsl,sec-v5.0-job-ring"))
error = dma_set_mask_and_coherent(jrdev,
DMA_BIT_MASK(40));
else
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 1990ed460c46..e4cf00014233 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -24,9 +24,6 @@
*/
#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
-/* Length of a single buffer in the QI driver memory cache */
-#define CAAM_QI_MEMCACHE_SIZE 512
-
#define CAAM_QI_ENQUEUE_RETRIES 10000
#define CAAM_NAPI_WEIGHT 63
@@ -55,6 +52,7 @@ struct caam_qi_pcpu_priv {
} ____cacheline_aligned;
static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+static DEFINE_PER_CPU(int, last_cpu);
/*
* caam_qi_priv - CAAM QI backend private params
@@ -203,8 +201,8 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev,
goto init_req_fq_fail;
}
- dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
- smp_processor_id());
+ dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+ smp_processor_id());
return req_fq;
init_req_fq_fail:
@@ -277,6 +275,7 @@ empty_fq:
dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
qman_destroy_fq(fq);
+ kfree(fq);
return ret;
}
@@ -342,8 +341,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
- dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
- new_fq->fqid);
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
return ret;
}
@@ -373,10 +371,9 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
- dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
- new_fq->fqid);
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
} else if (kill_fq(qidev, old_fq)) {
- dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
+ dev_warn(qidev, "Old CAAM FQ kill failed\n");
}
return 0;
@@ -392,7 +389,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
dma_addr_t hwdesc;
struct caam_drv_ctx *drv_ctx;
const cpumask_t *cpus = qman_affine_cpus();
- static DEFINE_PER_CPU(int, last_cpu);
num_words = desc_len(sh_desc);
if (num_words > MAX_SDLEN) {
@@ -511,7 +507,6 @@ int caam_qi_shutdown(struct device *qidev)
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
- kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
/*
@@ -646,7 +641,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
- dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+ dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
return 0;
}
@@ -679,7 +674,7 @@ static int init_cgr(struct device *qidev)
return ret;
}
- dev_info(qidev, "Congestion threshold set to %llu\n", val);
+ dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
return 0;
}
@@ -737,6 +732,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
qi_pdev = platform_device_register_full(&qi_pdev_info);
if (IS_ERR(qi_pdev))
return PTR_ERR(qi_pdev);
+ set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = &qi_pdev->dev;
@@ -795,10 +791,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
/* Done with the CGRs; restore the cpus allowed mask */
set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
- ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444,
- ctrlpriv->ctl,
- &times_congested,
- &caam_fops_u64_ro);
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
#endif
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 33b0433f5f22..ecb21f207637 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -39,6 +39,9 @@
*/
#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 768
+
extern bool caam_congested __read_mostly;
/*
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 84d2f838a063..2b5efff9ec3c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -293,6 +293,7 @@ struct caam_perfmon {
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_DPAA2 BIT(13)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
#define CTPR_MS_PG_SZ_MASK 0x10
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
new file mode 100644
index 000000000000..31b440757146
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SG_SW_QM2_H_
+#define _SG_SW_QM2_H_
+
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ dpaa2_sg_set_addr(qm_sg_ptr, dma);
+ dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
+ dpaa2_sg_set_final(qm_sg_ptr, false);
+ dpaa2_sg_set_len(qm_sg_ptr, len);
+ dpaa2_sg_set_bpid(qm_sg_ptr, 0);
+ dpaa2_sg_set_offset(qm_sg_ptr, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct dpaa2_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
+{
+ while (sg_count && sg) {
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ qm_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr,
+ u16 offset)
+{
+ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ dpaa2_sg_set_final(qm_sg_ptr, true);
+}
+
+#endif /* _SG_SW_QM2_H_ */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index c6adad09c972..936b1b630058 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,7 +5,13 @@
*
*/
+#ifndef _SG_SW_SEC4_H_
+#define _SG_SW_SEC4_H_
+
+#include "ctrl.h"
#include "regs.h"
+#include "sg_sw_qm2.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
struct sec4_sg_entry {
u64 ptr;
@@ -19,9 +25,15 @@ struct sec4_sg_entry {
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
- sec4_sg_ptr->len = cpu_to_caam32(len);
- sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
+ if (caam_dpaa2) {
+ dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
+ offset);
+ } else {
+ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+ sec4_sg_ptr->len = cpu_to_caam32(len);
+ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
+ SEC4_SG_OFFSET_MASK);
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -47,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
return sec4_sg_ptr - 1;
}
+static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
+{
+ if (caam_dpaa2)
+ dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
+ else
+ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+}
+
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
@@ -56,20 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
u16 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
- sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(sec4_sg_ptr);
}
-static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
- struct scatterlist *sg, unsigned int total,
- struct sec4_sg_entry *sec4_sg_ptr)
-{
- do {
- unsigned int len = min(sg_dma_len(sg), total);
-
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
- sec4_sg_ptr++;
- sg = sg_next(sg);
- total -= len;
- } while (total);
- return sec4_sg_ptr - 1;
-}
+#endif /* _SG_SW_SEC4_H_ */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 4119c40e7c4b..34a6d8bf229e 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -268,8 +268,10 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size)
- return -EINVAL;
+ if (!mcode->code_size) {
+ ret = -EINVAL;
+ goto fw_release;
+ }
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
@@ -280,7 +282,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
&mcode->phys_base, GFP_KERNEL);
if (!mcode->code) {
dev_err(dev, "Unable to allocate space for microcode");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fw_release;
}
memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
@@ -302,12 +305,14 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
- return ret;
+ goto fw_release;
}
dev_info(dev, "Microcode Loaded %s\n", mcode->version);
mcode->is_mc_valid = 1;
cpt->next_mc_idx++;
+
+fw_release:
release_firmware(fw_entry);
return ret;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 9ccefb9b7232..fee7cb2ce747 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -513,8 +513,10 @@ static int nitrox_probe(struct pci_dev *pdev,
pci_set_master(pdev);
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
- if (!ndev)
+ if (!ndev) {
+ err = -ENOMEM;
goto ndev_fail;
+ }
pci_set_drvdata(pdev, ndev);
ndev->pdev = pdev;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 2238f77aa248..6d626606b9c5 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,25 +1,33 @@
config CRYPTO_DEV_CCP_DD
- tristate "Cryptographic Coprocessor device driver"
- depends on CRYPTO_DEV_CCP
+ tristate "Secure Processor device driver"
default m
+ help
+ Provides AMD Secure Processor device driver.
+ If you choose 'M' here, this module will be called ccp.
+
+config CRYPTO_DEV_SP_CCP
+ bool "Cryptographic Coprocessor device"
+ default y
+ depends on CRYPTO_DEV_CCP_DD
select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
- Provides the interface to use the AMD Cryptographic Coprocessor
- which can be used to offload encryption operations such as SHA,
- AES and more. If you choose 'M' here, this module will be called
- ccp.
+ Provides the support for AMD Cryptographic Coprocessor (CCP) device
+ which can be used to offload encryption operations such as SHA, AES
+ and more.
config CRYPTO_DEV_CCP_CRYPTO
tristate "Encryption and hashing offload support"
- depends on CRYPTO_DEV_CCP_DD
default m
+ depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
+ select CRYPTO_RSA
help
Support for using the cryptographic API with the AMD Cryptographic
Coprocessor. This module supports offload of SHA and AES algorithms.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 59493fd3a751..57f8debfcfb3 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,12 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o \
+ccp-objs := sp-dev.o sp-platform.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-ops.o \
ccp-dev-v3.o \
ccp-dev-v5.o \
- ccp-platform.o \
ccp-dmaengine.o \
ccp-debugfs.o
-ccp-$(CONFIG_PCI) += ccp-pci.o
+ccp-$(CONFIG_PCI) += sp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
@@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes-xts.o \
ccp-crypto-aes-galois.o \
ccp-crypto-des3.o \
+ ccp-crypto-rsa.o \
ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 38ee6f348ea9..52313524a4dd 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 58a4244b4752..94b5bcf5b628 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -1,8 +1,9 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
+ * Author: Gary R Hook <gary.hook@amd.com>
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -15,6 +16,7 @@
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -37,46 +39,26 @@ struct ccp_unit_size_map {
u32 value;
};
-static struct ccp_unit_size_map unit_size_map[] = {
+static struct ccp_unit_size_map xts_unit_sizes[] = {
{
- .size = 4096,
- .value = CCP_XTS_AES_UNIT_SIZE_4096,
- },
- {
- .size = 2048,
- .value = CCP_XTS_AES_UNIT_SIZE_2048,
- },
- {
- .size = 1024,
- .value = CCP_XTS_AES_UNIT_SIZE_1024,
+ .size = 16,
+ .value = CCP_XTS_AES_UNIT_SIZE_16,
},
{
- .size = 512,
+ .size = 512,
.value = CCP_XTS_AES_UNIT_SIZE_512,
},
{
- .size = 256,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 128,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 64,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 32,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ .size = 1024,
+ .value = CCP_XTS_AES_UNIT_SIZE_1024,
},
{
- .size = 16,
- .value = CCP_XTS_AES_UNIT_SIZE_16,
+ .size = 2048,
+ .value = CCP_XTS_AES_UNIT_SIZE_2048,
},
{
- .size = 1,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ .size = 4096,
+ .value = CCP_XTS_AES_UNIT_SIZE_4096,
},
};
@@ -96,15 +78,26 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ unsigned int ccpversion = ccp_version();
+ int ret;
- /* Only support 128-bit AES key with a 128-bit Tweak key,
- * otherwise use the fallback
+ ret = xts_check_key(xfm, key, key_len);
+ if (ret)
+ return ret;
+
+ /* Version 3 devices support 128-bit keys; version 5 devices can
+ * accommodate 128- and 256-bit keys.
*/
switch (key_len) {
case AES_KEYSIZE_128 * 2:
memcpy(ctx->u.aes.key, key, key_len);
break;
+ case AES_KEYSIZE_256 * 2:
+ if (ccpversion > CCP_VERSION(3, 0))
+ memcpy(ctx->u.aes.key, key, key_len);
+ break;
}
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
@@ -117,6 +110,8 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ unsigned int ccpversion = ccp_version();
+ unsigned int fallback = 0;
unsigned int unit;
u32 unit_size;
int ret;
@@ -130,18 +125,32 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
+ /* Check conditions under which the CCP can fulfill a request. The
+ * device can handle input plaintext of a length that is a multiple
+ * of the unit_size, bug the crypto implementation only supports
+ * the unit_size being equal to the input length. This limits the
+ * number of scenarios we can handle.
+ */
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
- if (req->nbytes <= unit_size_map[0].size) {
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
- if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
- unit_size = unit_size_map[unit].value;
- break;
- }
+ for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
+ if (req->nbytes == xts_unit_sizes[unit].size) {
+ unit_size = unit;
+ break;
}
}
-
- if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
- (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ /* The CCP has restrictions on block sizes. Also, a version 3 device
+ * only supports AES-128 operations; version 5 CCPs support both
+ * AES-128 and -256 operations.
+ */
+ if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
+ fallback = 1;
+ if ((ccpversion < CCP_VERSION(5, 0)) &&
+ (ctx->u.aes.key_len != AES_KEYSIZE_128))
+ fallback = 1;
+ if ((ctx->u.aes.key_len != AES_KEYSIZE_128) &&
+ (ctx->u.aes.key_len != AES_KEYSIZE_256))
+ fallback = 1;
+ if (fallback) {
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
@@ -164,6 +173,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
rctx->cmd.u.xts.unit_size = unit_size;
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 5af7347ae03c..ae87b741f9d5 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <ghook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8dccbddabef1..35a9de7fd475 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -17,6 +17,7 @@
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/akcipher.h>
#include "ccp-crypto.h"
@@ -37,10 +38,15 @@ static unsigned int des3_disable;
module_param(des3_disable, uint, 0444);
MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
+static unsigned int rsa_disable;
+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
static LIST_HEAD(aead_algs);
+static LIST_HEAD(akcipher_algs);
/* For any tfm, requests for that tfm must be returned on the order
* received. With multiple queues available, the CCP can process more
@@ -358,6 +364,12 @@ static int ccp_register_algs(void)
return ret;
}
+ if (!rsa_disable) {
+ ret = ccp_register_rsa_algs(&akcipher_algs);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -366,6 +378,7 @@ static void ccp_unregister_algs(void)
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
+ struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
@@ -384,6 +397,12 @@ static void ccp_unregister_algs(void)
list_del(&aead_alg->entry);
kfree(aead_alg);
}
+
+ list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
+ crypto_unregister_akcipher(&akc_alg->alg);
+ list_del(&akc_alg->entry);
+ kfree(akc_alg);
+ }
}
static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index 000000000000..e6db8672d89c
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,299 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct akcipher_request, base);
+}
+
+static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
+ const u8 *buf, size_t sz)
+{
+ int nskip;
+
+ for (nskip = 0; nskip < sz; nskip++)
+ if (buf[nskip])
+ break;
+ *kplen = sz - nskip;
+ *kpbuf = kzalloc(*kplen, GFP_KERNEL);
+ if (!*kpbuf)
+ return -ENOMEM;
+ memcpy(*kpbuf, buf + nskip, *kplen);
+
+ return 0;
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct akcipher_request *req = akcipher_request_cast(async_req);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
+
+ return 0;
+}
+
+static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+ if (ccp_version() > CCP_VERSION(3, 0))
+ return CCP5_RSA_MAXMOD;
+ else
+ return CCP_RSA_MAXMOD;
+}
+
+static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+ int ret = 0;
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_RSA;
+
+ rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
+ if (encrypt) {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
+ } else {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
+ }
+ rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
+ rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
+ rctx->cmd.u.rsa.src = req->src;
+ rctx->cmd.u.rsa.src_len = req->src_len;
+ rctx->cmd.u.rsa.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_rsa_encrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, true);
+}
+
+static int ccp_rsa_decrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, false);
+}
+
+static int ccp_check_key_length(unsigned int len)
+{
+ /* In bits */
+ if (len < 8 || len > 4096)
+ return -EINVAL;
+ return 0;
+}
+
+static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
+{
+ /* Clean up old key data */
+ kzfree(ctx->u.rsa.e_buf);
+ ctx->u.rsa.e_buf = NULL;
+ ctx->u.rsa.e_len = 0;
+ kzfree(ctx->u.rsa.n_buf);
+ ctx->u.rsa.n_buf = NULL;
+ ctx->u.rsa.n_len = 0;
+ kzfree(ctx->u.rsa.d_buf);
+ ctx->u.rsa.d_buf = NULL;
+ ctx->u.rsa.d_len = 0;
+}
+
+static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key;
+ int ret;
+
+ ccp_rsa_free_key_bufs(ctx);
+ memset(&raw_key, 0, sizeof(raw_key));
+
+ /* Code borrowed from crypto/rsa.c */
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret)
+ goto n_key;
+
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len,
+ raw_key.n, raw_key.n_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
+
+ ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */
+ if (ccp_check_key_length(ctx->u.rsa.key_len)) {
+ ret = -EINVAL;
+ goto key_err;
+ }
+
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len,
+ raw_key.e, raw_key.e_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
+
+ if (private) {
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf,
+ &ctx->u.rsa.d_len,
+ raw_key.d, raw_key.d_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.d_sg,
+ ctx->u.rsa.d_buf, ctx->u.rsa.d_len);
+ }
+
+ return 0;
+
+key_err:
+ ccp_rsa_free_key_bufs(ctx);
+
+n_key:
+ return ret;
+}
+
+static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
+ ctx->complete = ccp_rsa_complete;
+
+ return 0;
+}
+
+static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ ccp_rsa_free_key_bufs(ctx);
+}
+
+static struct akcipher_alg ccp_rsa_defaults = {
+ .encrypt = ccp_rsa_encrypt,
+ .decrypt = ccp_rsa_decrypt,
+ .sign = ccp_rsa_decrypt,
+ .verify = ccp_rsa_encrypt,
+ .set_pub_key = ccp_rsa_setpubkey,
+ .set_priv_key = ccp_rsa_setprivkey,
+ .max_size = ccp_rsa_maxsize,
+ .init = ccp_rsa_init_tfm,
+ .exit = ccp_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-ccp",
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
+ },
+};
+
+struct ccp_rsa_def {
+ unsigned int version;
+ const char *name;
+ const char *driver_name;
+ unsigned int reqsize;
+ struct akcipher_alg *alg_defaults;
+};
+
+static struct ccp_rsa_def rsa_algs[] = {
+ {
+ .version = CCP_VERSION(3, 0),
+ .name = "rsa",
+ .driver_name = "rsa-ccp",
+ .reqsize = sizeof(struct ccp_rsa_req_ctx),
+ .alg_defaults = &ccp_rsa_defaults,
+ }
+};
+
+int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+{
+ struct ccp_crypto_akcipher_alg *ccp_alg;
+ struct akcipher_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ ret = crypto_register_akcipher(alg);
+ if (ret) {
+ pr_err("%s akcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_rsa_algs(struct list_head *head)
+{
+ int i, ret;
+ unsigned int ccpversion = ccp_version();
+
+ /* Register the RSA algorithm in standard mode
+ * This works for CCP v3 and later
+ */
+ for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
+ if (rsa_algs[i].version > ccpversion)
+ continue;
+ ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index ce97b3868f4a..8b9b16d433f7 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index dd5bf15f06e5..b9fd090c46c2 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -24,6 +24,8 @@
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/rsa.h>
#define CCP_LOG_LEVEL KERN_INFO
@@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg {
struct ahash_alg alg;
};
+struct ccp_crypto_akcipher_alg {
+ struct list_head entry;
+
+ struct akcipher_alg alg;
+};
+
static inline struct ccp_crypto_ablkcipher_alg *
ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
{
@@ -91,7 +99,7 @@ struct ccp_aes_ctx {
struct scatterlist key_sg;
unsigned int key_len;
- u8 key[AES_MAX_KEY_SIZE];
+ u8 key[AES_MAX_KEY_SIZE * 2];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
@@ -227,12 +235,35 @@ struct ccp_sha_exp_ctx {
u8 buf[MAX_SHA_BLOCK_SIZE];
};
+/***** RSA related defines *****/
+
+struct ccp_rsa_ctx {
+ unsigned int key_len; /* in bits */
+ struct scatterlist e_sg;
+ u8 *e_buf;
+ unsigned int e_len;
+ struct scatterlist n_sg;
+ u8 *n_buf;
+ unsigned int n_len;
+ struct scatterlist d_sg;
+ u8 *d_buf;
+ unsigned int d_len;
+};
+
+struct ccp_rsa_req_ctx {
+ struct ccp_cmd cmd;
+};
+
+#define CCP_RSA_MAXMOD (4 * 1024 / 8)
+#define CCP5_RSA_MAXMOD (16 * 1024 / 8)
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
union {
struct ccp_aes_ctx aes;
+ struct ccp_rsa_ctx rsa;
struct ccp_sha_ctx sha;
struct ccp_des3_ctx des3;
} u;
@@ -249,5 +280,6 @@ int ccp_register_aes_xts_algs(struct list_head *head);
int ccp_register_aes_aeads(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
int ccp_register_des3_algs(struct list_head *head);
+int ccp_register_rsa_algs(struct list_head *head);
#endif
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 3cd6c83754e0..59d4ca4e72d8 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -305,19 +305,19 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
if (!ccp->debugfs_instance)
- return;
+ goto err;
debugfs_info = debugfs_create_file("info", 0400,
ccp->debugfs_instance, ccp,
&ccp_debugfs_info_ops);
if (!debugfs_info)
- return;
+ goto err;
debugfs_stats = debugfs_create_file("stats", 0600,
ccp->debugfs_instance, ccp,
&ccp_debugfs_stats_ops);
if (!debugfs_stats)
- return;
+ goto err;
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -327,15 +327,20 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
debugfs_q_instance =
debugfs_create_dir(name, ccp->debugfs_instance);
if (!debugfs_q_instance)
- return;
+ goto err;
debugfs_q_stats =
debugfs_create_file("stats", 0600,
debugfs_q_instance, cmd_q,
&ccp_debugfs_queue_ops);
if (!debugfs_q_stats)
- return;
+ goto err;
}
+
+ return;
+
+err:
+ debugfs_remove_recursive(ccp->debugfs_instance);
}
void ccp5_debugfs_destroy(void)
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 367c2e30656f..240bebbcb8ac 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data)
static irqreturn_t ccp_irq_handler(int irq, void *data)
{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_device *ccp = (struct ccp_device *)data;
ccp_disable_queue_interrupts(ccp);
if (ccp->use_tasklet)
@@ -454,7 +453,7 @@ static int ccp_init(struct ccp_device *ccp)
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Request an irq */
- ret = ccp->get_irq(ccp);
+ ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
@@ -511,7 +510,7 @@ e_kthread:
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -550,7 +549,7 @@ static void ccp_destroy(struct ccp_device *ccp)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
for (i = 0; i < ccp->cmd_q_count; i++)
dma_pool_destroy(ccp->cmd_q[i].dma_pool);
@@ -586,10 +585,17 @@ static const struct ccp_actions ccp3_actions = {
.irqhandler = ccp_irq_handler,
};
+const struct ccp_vdata ccpv3_platform = {
+ .version = CCP_VERSION(3, 0),
+ .setup = NULL,
+ .perform = &ccp3_actions,
+ .offset = 0,
+};
+
const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions,
- .bar = 2,
.offset = 0x20000,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index b10d2d2075cb..65604fc65e8f 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
@@ -145,6 +145,7 @@ union ccp_function {
#define CCP_AES_MODE(p) ((p)->aes.mode)
#define CCP_AES_TYPE(p) ((p)->aes.type)
#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
+#define CCP_XTS_TYPE(p) ((p)->aes_xts.type)
#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
#define CCP_DES3_SIZE(p) ((p)->des3.size)
#define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
@@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
+ CCP_XTS_TYPE(&function) = op->u.xts.type;
CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
CCP5_CMD_FUNCTION(&desc) = function.raw;
@@ -469,7 +471,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
+ CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -484,10 +486,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Exponent is in LSB memory */
- CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
- CCP5_CMD_KEY_HI(&desc) = 0;
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ /* Key (Exponent) is in external memory */
+ CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+ CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -769,8 +771,7 @@ static void ccp5_irq_bh(unsigned long data)
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_device *ccp = (struct ccp_device *)data;
ccp5_disable_queue_interrupts(ccp);
ccp->total_interrupts++;
@@ -881,7 +882,7 @@ static int ccp5_init(struct ccp_device *ccp)
dev_dbg(dev, "Requesting an IRQ...\n");
/* Request an irq */
- ret = ccp->get_irq(ccp);
+ ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
@@ -987,7 +988,7 @@ e_kthread:
kthread_stop(ccp->cmd_q[i].kthread);
e_irq:
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -1037,7 +1038,7 @@ static void ccp5_destroy(struct ccp_device *ccp)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -1106,15 +1107,14 @@ static const struct ccp_actions ccp5_actions = {
.init = ccp5_init,
.destroy = ccp5_destroy,
.get_free_slots = ccp5_get_free_slots,
- .irqhandler = ccp5_irq_handler,
};
const struct ccp_vdata ccpv5a = {
.version = CCP_VERSION(5, 0),
.setup = ccp5_config,
.perform = &ccp5_actions,
- .bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv5b = {
@@ -1122,6 +1122,6 @@ const struct ccp_vdata ccpv5b = {
.dma_chan_attr = DMA_PRIVATE,
.setup = ccp5other_config,
.perform = &ccp5_actions,
- .bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2506b5025700..4e029b176641 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -11,7 +11,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched.h>
@@ -30,12 +29,6 @@
#include "ccp-dev.h"
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1.0");
-MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
-
struct ccp_tasklet_data {
struct completion completion;
struct ccp_cmd *cmd;
@@ -111,13 +104,6 @@ static LIST_HEAD(ccp_units);
static DEFINE_SPINLOCK(ccp_rr_lock);
static struct ccp_device *ccp_rr;
-/* Ever-increasing value to produce unique unit numbers */
-static atomic_t ccp_unit_ordinal;
-static unsigned int ccp_increment_unit_ordinal(void)
-{
- return atomic_inc_return(&ccp_unit_ordinal);
-}
-
/**
* ccp_add_device - add a CCP device to the list
*
@@ -415,6 +401,7 @@ static void ccp_do_cmd_complete(unsigned long data)
struct ccp_cmd *cmd = tdata->cmd;
cmd->callback(cmd->data, cmd->ret);
+
complete(&tdata->completion);
}
@@ -464,14 +451,17 @@ int ccp_cmd_queue_thread(void *data)
*
* @dev: device struct of the CCP
*/
-struct ccp_device *ccp_alloc_struct(struct device *dev)
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
{
+ struct device *dev = sp->dev;
struct ccp_device *ccp;
ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
if (!ccp)
return NULL;
ccp->dev = dev;
+ ccp->sp = sp;
+ ccp->axcache = sp->axcache;
INIT_LIST_HEAD(&ccp->cmd);
INIT_LIST_HEAD(&ccp->backlog);
@@ -486,9 +476,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue);
- ccp->ord = ccp_increment_unit_ordinal();
- snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
- snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
return ccp;
}
@@ -538,55 +527,100 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-#endif
-static int __init ccp_mod_init(void)
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
{
-#ifdef CONFIG_X86
- int ret;
+ struct ccp_device *ccp = sp->ccp_data;
+ unsigned long flags;
+ unsigned int i;
- ret = ccp_pci_init();
- if (ret)
- return ret;
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
- /* Don't leave the driver loaded if init failed */
- if (ccp_present() != 0) {
- ccp_pci_exit();
- return -ENODEV;
+ ccp->suspending = 1;
+
+ /* Wake all the queue kthreads to prepare for suspend */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* Wait for all queue kthreads to say they're done */
+ while (!ccp_queues_suspended(ccp))
+ wait_event_interruptible(ccp->suspend_queue,
+ ccp_queues_suspended(ccp));
+
+ return 0;
+}
+
+int ccp_dev_resume(struct sp_device *sp)
+{
+ struct ccp_device *ccp = sp->ccp_data;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+
+ /* Wake up all the kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].suspended = 0;
+ wake_up_process(ccp->cmd_q[i].kthread);
}
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
return 0;
+}
#endif
-#ifdef CONFIG_ARM64
+int ccp_dev_init(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct ccp_device *ccp;
int ret;
- ret = ccp_platform_init();
+ ret = -ENOMEM;
+ ccp = ccp_alloc_struct(sp);
+ if (!ccp)
+ goto e_err;
+ sp->ccp_data = ccp;
+
+ ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ccp->use_tasklet = sp->use_tasklet;
+
+ ccp->io_regs = sp->io_map + ccp->vdata->offset;
+ if (ccp->vdata->setup)
+ ccp->vdata->setup(ccp);
+
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
- return ret;
+ goto e_err;
- /* Don't leave the driver loaded if init failed */
- if (ccp_present() != 0) {
- ccp_platform_exit();
- return -ENODEV;
- }
+ dev_notice(dev, "ccp enabled\n");
return 0;
-#endif
- return -ENODEV;
+e_err:
+ sp->ccp_data = NULL;
+
+ dev_notice(dev, "ccp initialization failed\n");
+
+ return ret;
}
-static void __exit ccp_mod_exit(void)
+void ccp_dev_destroy(struct sp_device *sp)
{
-#ifdef CONFIG_X86
- ccp_pci_exit();
-#endif
+ struct ccp_device *ccp = sp->ccp_data;
-#ifdef CONFIG_ARM64
- ccp_platform_exit();
-#endif
-}
+ if (!ccp)
+ return;
-module_init(ccp_mod_init);
-module_exit(ccp_mod_exit);
+ ccp->vdata->perform->destroy(ccp);
+}
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a70154ac7405..6810b65c1939 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -27,6 +27,8 @@
#include <linux/irqreturn.h>
#include <linux/dmaengine.h>
+#include "sp-dev.h"
+
#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -192,6 +194,7 @@
#define CCP_AES_CTX_SB_COUNT 1
#define CCP_XTS_AES_KEY_SB_COUNT 1
+#define CCP5_XTS_AES_KEY_SB_COUNT 2
#define CCP_XTS_AES_CTX_SB_COUNT 1
#define CCP_DES3_KEY_SB_COUNT 1
@@ -200,6 +203,7 @@
#define CCP_SHA_SB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
+#define CCP5_RSA_MAX_WIDTH 16384
#define CCP_PASSTHRU_BLOCKSIZE 256
#define CCP_PASSTHRU_MASKSIZE 32
@@ -344,12 +348,11 @@ struct ccp_device {
char rngname[MAX_CCP_NAME_LEN];
struct device *dev;
+ struct sp_device *sp;
/* Bus specific device information
*/
void *dev_specific;
- int (*get_irq)(struct ccp_device *ccp);
- void (*free_irq)(struct ccp_device *ccp);
unsigned int qim;
unsigned int irq;
bool use_tasklet;
@@ -362,7 +365,6 @@ struct ccp_device {
* them.
*/
struct mutex req_mutex ____cacheline_aligned;
- void __iomem *io_map;
void __iomem *io_regs;
/* Master lists that all cmds are queued on. Because there can be
@@ -497,6 +499,7 @@ struct ccp_aes_op {
};
struct ccp_xts_aes_op {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
};
@@ -626,18 +629,12 @@ struct ccp5_desc {
struct dword7 dw7;
};
-int ccp_pci_init(void);
-void ccp_pci_exit(void);
-
-int ccp_platform_init(void);
-void ccp_platform_exit(void);
-
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
extern void ccp_log_error(struct ccp_device *, int);
-struct ccp_device *ccp_alloc_struct(struct device *dev);
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
int ccp_cmd_queue_thread(void *data);
int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
@@ -669,16 +666,7 @@ struct ccp_actions {
irqreturn_t (*irqhandler)(int, void *);
};
-/* Structure to hold CCP version-specific values */
-struct ccp_vdata {
- const unsigned int version;
- const unsigned int dma_chan_attr;
- void (*setup)(struct ccp_device *);
- const struct ccp_actions *perform;
- const unsigned int bar;
- const unsigned int offset;
-};
-
+extern const struct ccp_vdata ccpv3_platform;
extern const struct ccp_vdata ccpv3;
extern const struct ccp_vdata ccpv5a;
extern const struct ccp_vdata ccpv5b;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e00be01fbf5a..901343dd513e 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c0dfdacbdff5..406b95329b3d 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -168,7 +168,7 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (!wa->dma.address)
+ if (dma_mapping_error(wa->dev, wa->dma.address))
return -ENOMEM;
wa->dma.length = len;
@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_op op;
unsigned int unit_size, dm_offset;
bool in_place = false;
+ unsigned int sb_count;
+ enum ccp_aes_type aestype;
int ret;
switch (xts->unit_size) {
@@ -1061,7 +1063,11 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- if (xts->key_len != AES_KEYSIZE_128)
+ if (xts->key_len == AES_KEYSIZE_128)
+ aestype = CCP_AES_TYPE_128;
+ else if (xts->key_len == AES_KEYSIZE_256)
+ aestype = CCP_AES_TYPE_256;
+ else
return -EINVAL;
if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
@@ -1083,23 +1089,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx;
op.init = 1;
+ op.u.xts.type = aestype;
op.u.xts.action = xts->action;
op.u.xts.unit_size = xts->unit_size;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
+ /* A version 3 device only supports 128-bit keys, which fits into a
+ * single SB entry. A version 5 device uses a 512-bit vector, so two
+ * SB entries.
*/
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ sb_count = CCP_XTS_AES_KEY_SB_COUNT;
+ else
+ sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+ sb_count * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+ /* All supported key sizes must be in little endian format.
+ * Use the 256-bit byte swap passthru option to convert from
+ * big endian to little endian.
+ */
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ } else {
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
+ * occupies 256 bits, or one entire slot, and is zero-padded.
+ */
+ unsigned int pad;
+
+ dm_offset = CCP_SB_BYTES;
+ pad = dm_offset - xts->key_len;
+ ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+ xts->key_len);
+ }
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1731,42 +1758,53 @@ e_ctx:
static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_rsa_engine *rsa = &cmd->u.rsa;
- struct ccp_dm_workarea exp, src;
- struct ccp_data dst;
+ struct ccp_dm_workarea exp, src, dst;
struct ccp_op op;
unsigned int sb_count, i_len, o_len;
int ret;
- if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+ /* Check against the maximum allowable size, in bits */
+ if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
return -EINVAL;
if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
return -EINVAL;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
/* The RSA modulus must precede the message being acted upon, so
* it must be copied to a DMA area where the message and the
* modulus can be concatenated. Therefore the input buffer
* length required is twice the output buffer length (which
- * must be a multiple of 256-bits).
+ * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
+ * Buffer sizes must be a multiple of 32 bytes; rounding up may be
+ * required.
*/
- o_len = ((rsa->key_size + 255) / 256) * 32;
+ o_len = 32 * ((rsa->key_size + 255) / 256);
i_len = o_len * 2;
- sb_count = o_len / CCP_SB_BYTES;
-
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
-
- if (!op.sb_key)
- return -EIO;
+ sb_count = 0;
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* sb_count is the number of storage block slots required
+ * for the modulus.
+ */
+ sb_count = o_len / CCP_SB_BYTES;
+ op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+ sb_count);
+ if (!op.sb_key)
+ return -EIO;
+ } else {
+ /* A version 5 device allows a modulus size that will not fit
+ * in the LSB, so the command will transfer it from memory.
+ * Set the sb key to the default, even though it's not used.
+ */
+ op.sb_key = cmd_q->sb_key;
+ }
- /* The RSA exponent may span multiple (32-byte) SB entries and must
- * be in little endian format. Reverse copy each 32-byte chunk
- * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
- * and each byte within that chunk and do not perform any byte swap
- * operations on the passthru operation.
+ /* The RSA exponent must be in little endian format. Reverse its
+ * byte order.
*/
ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
if (ret)
@@ -1775,11 +1813,22 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
if (ret)
goto e_exp;
- ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_exp;
+
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* Copy the exponent to the local storage block, using
+ * as many 32-byte blocks as were allocated above. It's
+ * already little endian, so no further change is required.
+ */
+ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_exp;
+ }
+ } else {
+ /* The exponent can be retrieved from memory via DMA. */
+ op.exp.u.dma.address = exp.dma.address;
+ op.exp.u.dma.offset = 0;
}
/* Concatenate the modulus and the message. Both the modulus and
@@ -1798,8 +1847,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src;
/* Prepare the output area for the operation */
- ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
- o_len, DMA_FROM_DEVICE);
+ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
if (ret)
goto e_src;
@@ -1807,7 +1855,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.src.u.dma.address = src.dma.address;
op.src.u.dma.offset = 0;
op.src.u.dma.length = i_len;
- op.dst.u.dma.address = dst.dm_wa.dma.address;
+ op.dst.u.dma.address = dst.dma.address;
op.dst.u.dma.offset = 0;
op.dst.u.dma.length = o_len;
@@ -1820,10 +1868,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
- ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
+ ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
e_dst:
- ccp_free_data(&dst, cmd_q);
+ ccp_dm_free(&dst);
e_src:
ccp_dm_free(&src);
@@ -1832,7 +1880,8 @@ e_exp:
ccp_dm_free(&exp);
e_sb:
- cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
+ if (sb_count)
+ cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
return ret;
}
@@ -1992,7 +2041,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
deleted file mode 100644
index e880d4cf4ada..000000000000
--- a/drivers/crypto/ccp/ccp-pci.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- * Author: Gary R Hook <gary.hook@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/dma-mapping.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-
-#include "ccp-dev.h"
-
-#define MSIX_VECTORS 2
-
-struct ccp_msix {
- u32 vector;
- char name[16];
-};
-
-struct ccp_pci {
- int msix_count;
- struct ccp_msix msix[MSIX_VECTORS];
-};
-
-static int ccp_get_msix_irqs(struct ccp_device *ccp)
-{
- struct ccp_pci *ccp_pci = ccp->dev_specific;
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct msix_entry msix_entry[MSIX_VECTORS];
- unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
- int v, ret;
-
- for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
- msix_entry[v].entry = v;
-
- ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
- if (ret < 0)
- return ret;
-
- ccp_pci->msix_count = ret;
- for (v = 0; v < ccp_pci->msix_count; v++) {
- /* Set the interrupt names and request the irqs */
- snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
- ccp->name, v);
- ccp_pci->msix[v].vector = msix_entry[v].vector;
- ret = request_irq(ccp_pci->msix[v].vector,
- ccp->vdata->perform->irqhandler,
- 0, ccp_pci->msix[v].name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
- ret);
- goto e_irq;
- }
- }
- ccp->use_tasklet = true;
-
- return 0;
-
-e_irq:
- while (v--)
- free_irq(ccp_pci->msix[v].vector, dev);
-
- pci_disable_msix(pdev);
-
- ccp_pci->msix_count = 0;
-
- return ret;
-}
-
-static int ccp_get_msi_irq(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_enable_msi(pdev);
- if (ret)
- return ret;
-
- ccp->irq = pdev->irq;
- ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
- ccp->name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
- goto e_msi;
- }
- ccp->use_tasklet = true;
-
- return 0;
-
-e_msi:
- pci_disable_msi(pdev);
-
- return ret;
-}
-
-static int ccp_get_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- int ret;
-
- ret = ccp_get_msix_irqs(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get MSI-X vectors, try MSI */
- dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
- ret = ccp_get_msi_irq(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get MSI interrupt */
- dev_notice(dev, "could not enable MSI (%d)\n", ret);
-
- return ret;
-}
-
-static void ccp_free_irqs(struct ccp_device *ccp)
-{
- struct ccp_pci *ccp_pci = ccp->dev_specific;
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (ccp_pci->msix_count) {
- while (ccp_pci->msix_count--)
- free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
- dev);
- pci_disable_msix(pdev);
- } else if (ccp->irq) {
- free_irq(ccp->irq, dev);
- pci_disable_msi(pdev);
- }
- ccp->irq = 0;
-}
-
-static int ccp_find_mmio_area(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- resource_size_t io_len;
- unsigned long io_flags;
-
- io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
- io_len = pci_resource_len(pdev, ccp->vdata->bar);
- if ((io_flags & IORESOURCE_MEM) &&
- (io_len >= (ccp->vdata->offset + 0x800)))
- return ccp->vdata->bar;
-
- return -EIO;
-}
-
-static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ccp_device *ccp;
- struct ccp_pci *ccp_pci;
- struct device *dev = &pdev->dev;
- unsigned int bar;
- int ret;
-
- ret = -ENOMEM;
- ccp = ccp_alloc_struct(dev);
- if (!ccp)
- goto e_err;
-
- ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
- if (!ccp_pci)
- goto e_err;
-
- ccp->dev_specific = ccp_pci;
- ccp->vdata = (struct ccp_vdata *)id->driver_data;
- if (!ccp->vdata || !ccp->vdata->version) {
- ret = -ENODEV;
- dev_err(dev, "missing driver data\n");
- goto e_err;
- }
- ccp->get_irq = ccp_get_irqs;
- ccp->free_irq = ccp_free_irqs;
-
- ret = pci_request_regions(pdev, "ccp");
- if (ret) {
- dev_err(dev, "pci_request_regions failed (%d)\n", ret);
- goto e_err;
- }
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(dev, "pci_enable_device failed (%d)\n", ret);
- goto e_regions;
- }
-
- pci_set_master(pdev);
-
- ret = ccp_find_mmio_area(ccp);
- if (ret < 0)
- goto e_device;
- bar = ret;
-
- ret = -EIO;
- ccp->io_map = pci_iomap(pdev, bar, 0);
- if (!ccp->io_map) {
- dev_err(dev, "pci_iomap failed\n");
- goto e_device;
- }
- ccp->io_regs = ccp->io_map + ccp->vdata->offset;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
- ret);
- goto e_iomap;
- }
- }
-
- dev_set_drvdata(dev, ccp);
-
- if (ccp->vdata->setup)
- ccp->vdata->setup(ccp);
-
- ret = ccp->vdata->perform->init(ccp);
- if (ret)
- goto e_iomap;
-
- dev_notice(dev, "enabled\n");
-
- return 0;
-
-e_iomap:
- pci_iounmap(pdev, ccp->io_map);
-
-e_device:
- pci_disable_device(pdev);
-
-e_regions:
- pci_release_regions(pdev);
-
-e_err:
- dev_notice(dev, "initialization failed\n");
- return ret;
-}
-
-static void ccp_pci_remove(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
-
- if (!ccp)
- return;
-
- ccp->vdata->perform->destroy(ccp);
-
- pci_iounmap(pdev, ccp->io_map);
-
- pci_disable_device(pdev);
-
- pci_release_regions(pdev);
-
- dev_notice(dev, "disabled\n");
-}
-
-#ifdef CONFIG_PM
-static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 1;
-
- /* Wake all the queue kthreads to prepare for suspend */
- for (i = 0; i < ccp->cmd_q_count; i++)
- wake_up_process(ccp->cmd_q[i].kthread);
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- /* Wait for all queue kthreads to say they're done */
- while (!ccp_queues_suspended(ccp))
- wait_event_interruptible(ccp->suspend_queue,
- ccp_queues_suspended(ccp));
-
- return 0;
-}
-
-static int ccp_pci_resume(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 0;
-
- /* Wake up all the kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- ccp->cmd_q[i].suspended = 0;
- wake_up_process(ccp->cmd_q[i].kthread);
- }
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
-}
-#endif
-
-static const struct pci_device_id ccp_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
- { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
- { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
- /* Last entry must be zero */
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ccp_pci_table);
-
-static struct pci_driver ccp_pci_driver = {
- .name = "ccp",
- .id_table = ccp_pci_table,
- .probe = ccp_pci_probe,
- .remove = ccp_pci_remove,
-#ifdef CONFIG_PM
- .suspend = ccp_pci_suspend,
- .resume = ccp_pci_resume,
-#endif
-};
-
-int ccp_pci_init(void)
-{
- return pci_register_driver(&ccp_pci_driver);
-}
-
-void ccp_pci_exit(void)
-{
- pci_unregister_driver(&ccp_pci_driver);
-}
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
deleted file mode 100644
index e26969e601ad..000000000000
--- a/drivers/crypto/ccp/ccp-platform.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/dma-mapping.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/acpi.h>
-
-#include "ccp-dev.h"
-
-struct ccp_platform {
- int coherent;
-};
-
-static const struct acpi_device_id ccp_acpi_match[];
-static const struct of_device_id ccp_of_match[];
-
-static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_OF
- const struct of_device_id *match;
-
- match = of_match_node(ccp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct ccp_vdata *)match->data;
-#endif
- return NULL;
-}
-
-static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_ACPI
- const struct acpi_device_id *match;
-
- match = acpi_match_device(ccp_acpi_match, &pdev->dev);
- if (match && match->driver_data)
- return (struct ccp_vdata *)match->driver_data;
-#endif
- return NULL;
-}
-
-static int ccp_get_irq(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct platform_device *pdev = to_platform_device(dev);
- int ret;
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- ccp->irq = ret;
- ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
- ccp->name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int ccp_get_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- int ret;
-
- ret = ccp_get_irq(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get an interrupt */
- dev_notice(dev, "could not enable interrupts (%d)\n", ret);
-
- return ret;
-}
-
-static void ccp_free_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
-
- free_irq(ccp->irq, dev);
-}
-
-static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *ior;
-
- ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ior && (resource_size(ior) >= 0x800))
- return ior;
-
- return NULL;
-}
-
-static int ccp_platform_probe(struct platform_device *pdev)
-{
- struct ccp_device *ccp;
- struct ccp_platform *ccp_platform;
- struct device *dev = &pdev->dev;
- enum dev_dma_attr attr;
- struct resource *ior;
- int ret;
-
- ret = -ENOMEM;
- ccp = ccp_alloc_struct(dev);
- if (!ccp)
- goto e_err;
-
- ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
- if (!ccp_platform)
- goto e_err;
-
- ccp->dev_specific = ccp_platform;
- ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
- : ccp_get_acpi_version(pdev);
- if (!ccp->vdata || !ccp->vdata->version) {
- ret = -ENODEV;
- dev_err(dev, "missing driver data\n");
- goto e_err;
- }
- ccp->get_irq = ccp_get_irqs;
- ccp->free_irq = ccp_free_irqs;
-
- ior = ccp_find_mmio_area(ccp);
- ccp->io_map = devm_ioremap_resource(dev, ior);
- if (IS_ERR(ccp->io_map)) {
- ret = PTR_ERR(ccp->io_map);
- goto e_err;
- }
- ccp->io_regs = ccp->io_map;
-
- attr = device_get_dma_attr(dev);
- if (attr == DEV_DMA_NOT_SUPPORTED) {
- dev_err(dev, "DMA is not supported");
- goto e_err;
- }
-
- ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
- if (ccp_platform->coherent)
- ccp->axcache = CACHE_WB_NO_ALLOC;
- else
- ccp->axcache = CACHE_NONE;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
- goto e_err;
- }
-
- dev_set_drvdata(dev, ccp);
-
- ret = ccp->vdata->perform->init(ccp);
- if (ret)
- goto e_err;
-
- dev_notice(dev, "enabled\n");
-
- return 0;
-
-e_err:
- dev_notice(dev, "initialization failed\n");
- return ret;
-}
-
-static int ccp_platform_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
-
- ccp->vdata->perform->destroy(ccp);
-
- dev_notice(dev, "disabled\n");
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ccp_platform_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 1;
-
- /* Wake all the queue kthreads to prepare for suspend */
- for (i = 0; i < ccp->cmd_q_count; i++)
- wake_up_process(ccp->cmd_q[i].kthread);
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- /* Wait for all queue kthreads to say they're done */
- while (!ccp_queues_suspended(ccp))
- wait_event_interruptible(ccp->suspend_queue,
- ccp_queues_suspended(ccp));
-
- return 0;
-}
-
-static int ccp_platform_resume(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 0;
-
- /* Wake up all the kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- ccp->cmd_q[i].suspended = 0;
- wake_up_process(ccp->cmd_q[i].kthread);
- }
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ccp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&ccpv3 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id ccp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&ccpv3 },
- { },
-};
-MODULE_DEVICE_TABLE(of, ccp_of_match);
-#endif
-
-static struct platform_driver ccp_platform_driver = {
- .driver = {
- .name = "ccp",
-#ifdef CONFIG_ACPI
- .acpi_match_table = ccp_acpi_match,
-#endif
-#ifdef CONFIG_OF
- .of_match_table = ccp_of_match,
-#endif
- },
- .probe = ccp_platform_probe,
- .remove = ccp_platform_remove,
-#ifdef CONFIG_PM
- .suspend = ccp_platform_suspend,
- .resume = ccp_platform_resume,
-#endif
-};
-
-int ccp_platform_init(void)
-{
- return platform_driver_register(&ccp_platform_driver);
-}
-
-void ccp_platform_exit(void)
-{
- platform_driver_unregister(&ccp_platform_driver);
-}
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
new file mode 100644
index 000000000000..bef387c8abfd
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -0,0 +1,277 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "sp-dev.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1.0");
+MODULE_DESCRIPTION("AMD Secure Processor driver");
+
+/* List of SPs, SP count, read-write access lock, and access functions
+ *
+ * Lock structure: get sp_unit_lock for reading whenever we need to
+ * examine the SP list.
+ */
+static DEFINE_RWLOCK(sp_unit_lock);
+static LIST_HEAD(sp_units);
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t sp_ordinal;
+
+static void sp_add_device(struct sp_device *sp)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+
+ list_add_tail(&sp->entry, &sp_units);
+
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static void sp_del_device(struct sp_device *sp)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+
+ list_del(&sp->entry);
+
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static irqreturn_t sp_irq_handler(int irq, void *data)
+{
+ struct sp_device *sp = data;
+
+ if (sp->ccp_irq_handler)
+ sp->ccp_irq_handler(irq, sp->ccp_irq_data);
+
+ if (sp->psp_irq_handler)
+ sp->psp_irq_handler(irq, sp->psp_irq_data);
+
+ return IRQ_HANDLED;
+}
+
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data)
+{
+ int ret;
+
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+ /* Need a common routine to manage all interrupts */
+ sp->ccp_irq_data = data;
+ sp->ccp_irq_handler = handler;
+
+ if (!sp->irq_registered) {
+ ret = request_irq(sp->ccp_irq, sp_irq_handler, 0,
+ sp->name, sp);
+ if (ret)
+ return ret;
+
+ sp->irq_registered = true;
+ }
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ ret = request_irq(sp->ccp_irq, handler, 0, name, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data)
+{
+ int ret;
+
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+ /* Need a common routine to manage all interrupts */
+ sp->psp_irq_data = data;
+ sp->psp_irq_handler = handler;
+
+ if (!sp->irq_registered) {
+ ret = request_irq(sp->psp_irq, sp_irq_handler, 0,
+ sp->name, sp);
+ if (ret)
+ return ret;
+
+ sp->irq_registered = true;
+ }
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ ret = request_irq(sp->psp_irq, handler, 0, name, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void sp_free_ccp_irq(struct sp_device *sp, void *data)
+{
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+ /* Using common routine to manage all interrupts */
+ if (!sp->psp_irq_handler) {
+ /* Nothing else using it, so free it */
+ free_irq(sp->ccp_irq, sp);
+
+ sp->irq_registered = false;
+ }
+
+ sp->ccp_irq_handler = NULL;
+ sp->ccp_irq_data = NULL;
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ free_irq(sp->ccp_irq, data);
+ }
+}
+
+void sp_free_psp_irq(struct sp_device *sp, void *data)
+{
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+ /* Using common routine to manage all interrupts */
+ if (!sp->ccp_irq_handler) {
+ /* Nothing else using it, so free it */
+ free_irq(sp->psp_irq, sp);
+
+ sp->irq_registered = false;
+ }
+
+ sp->psp_irq_handler = NULL;
+ sp->psp_irq_data = NULL;
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ free_irq(sp->psp_irq, data);
+ }
+}
+
+/**
+ * sp_alloc_struct - allocate and initialize the sp_device struct
+ *
+ * @dev: device struct of the SP
+ */
+struct sp_device *sp_alloc_struct(struct device *dev)
+{
+ struct sp_device *sp;
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return NULL;
+
+ sp->dev = dev;
+ sp->ord = atomic_inc_return(&sp_ordinal);
+ snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord);
+
+ return sp;
+}
+
+int sp_init(struct sp_device *sp)
+{
+ sp_add_device(sp);
+
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_init(sp);
+
+ return 0;
+}
+
+void sp_destroy(struct sp_device *sp)
+{
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_destroy(sp);
+
+ sp_del_device(sp);
+}
+
+#ifdef CONFIG_PM
+int sp_suspend(struct sp_device *sp, pm_message_t state)
+{
+ int ret;
+
+ if (sp->dev_vdata->ccp_vdata) {
+ ret = ccp_dev_suspend(sp, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sp_resume(struct sp_device *sp)
+{
+ int ret;
+
+ if (sp->dev_vdata->ccp_vdata) {
+ ret = ccp_dev_resume(sp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static int __init sp_mod_init(void)
+{
+#ifdef CONFIG_X86
+ int ret;
+
+ ret = sp_pci_init();
+ if (ret)
+ return ret;
+
+ return 0;
+#endif
+
+#ifdef CONFIG_ARM64
+ int ret;
+
+ ret = sp_platform_init();
+ if (ret)
+ return ret;
+
+ return 0;
+#endif
+
+ return -ENODEV;
+}
+
+static void __exit sp_mod_exit(void)
+{
+#ifdef CONFIG_X86
+ sp_pci_exit();
+#endif
+
+#ifdef CONFIG_ARM64
+ sp_platform_exit();
+#endif
+}
+
+module_init(sp_mod_init);
+module_exit(sp_mod_exit);
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
new file mode 100644
index 000000000000..5ab486ade1ad
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -0,0 +1,133 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SP_DEV_H__
+#define __SP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#define SP_MAX_NAME_LEN 32
+
+#define CACHE_NONE 0x00
+#define CACHE_WB_NO_ALLOC 0xb7
+
+/* Structure to hold CCP device data */
+struct ccp_device;
+struct ccp_vdata {
+ const unsigned int version;
+ const unsigned int dma_chan_attr;
+ void (*setup)(struct ccp_device *);
+ const struct ccp_actions *perform;
+ const unsigned int offset;
+ const unsigned int rsamax;
+};
+/* Structure to hold SP device data */
+struct sp_dev_vdata {
+ const unsigned int bar;
+
+ const struct ccp_vdata *ccp_vdata;
+ void *psp_vdata;
+};
+
+struct sp_device {
+ struct list_head entry;
+
+ struct device *dev;
+
+ struct sp_dev_vdata *dev_vdata;
+ unsigned int ord;
+ char name[SP_MAX_NAME_LEN];
+
+ /* Bus specific device information */
+ void *dev_specific;
+
+ /* I/O area used for device communication. */
+ void __iomem *io_map;
+
+ /* DMA caching attribute support */
+ unsigned int axcache;
+
+ bool irq_registered;
+ bool use_tasklet;
+
+ unsigned int ccp_irq;
+ irq_handler_t ccp_irq_handler;
+ void *ccp_irq_data;
+
+ unsigned int psp_irq;
+ irq_handler_t psp_irq_handler;
+ void *psp_irq_data;
+
+ void *ccp_data;
+ void *psp_data;
+};
+
+int sp_pci_init(void);
+void sp_pci_exit(void);
+
+int sp_platform_init(void);
+void sp_platform_exit(void);
+
+struct sp_device *sp_alloc_struct(struct device *dev);
+
+int sp_init(struct sp_device *sp);
+void sp_destroy(struct sp_device *sp);
+struct sp_device *sp_get_master(void);
+
+int sp_suspend(struct sp_device *sp, pm_message_t state);
+int sp_resume(struct sp_device *sp);
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+void sp_free_ccp_irq(struct sp_device *sp, void *data);
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+void sp_free_psp_irq(struct sp_device *sp, void *data);
+
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+
+int ccp_dev_init(struct sp_device *sp);
+void ccp_dev_destroy(struct sp_device *sp);
+
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
+int ccp_dev_resume(struct sp_device *sp);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
+
+static inline int ccp_dev_init(struct sp_device *sp)
+{
+ return 0;
+}
+static inline void ccp_dev_destroy(struct sp_device *sp) { }
+
+static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+{
+ return 0;
+}
+static inline int ccp_dev_resume(struct sp_device *sp)
+{
+ return 0;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
+
+#endif
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
new file mode 100644
index 000000000000..9859aa683a28
--- /dev/null
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -0,0 +1,276 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+#define MSIX_VECTORS 2
+
+struct sp_pci {
+ int msix_count;
+ struct msix_entry msix_entry[MSIX_VECTORS];
+};
+
+static int sp_get_msix_irqs(struct sp_device *sp)
+{
+ struct sp_pci *sp_pci = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int v, ret;
+
+ for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++)
+ sp_pci->msix_entry[v].entry = v;
+
+ ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v);
+ if (ret < 0)
+ return ret;
+
+ sp_pci->msix_count = ret;
+ sp->use_tasklet = true;
+
+ sp->psp_irq = sp_pci->msix_entry[0].vector;
+ sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector
+ : sp_pci->msix_entry[0].vector;
+ return 0;
+}
+
+static int sp_get_msi_irq(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ sp->ccp_irq = pdev->irq;
+ sp->psp_irq = pdev->irq;
+
+ return 0;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ int ret;
+
+ ret = sp_get_msix_irqs(sp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = sp_get_msi_irq(sp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_notice(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void sp_free_irqs(struct sp_device *sp)
+{
+ struct sp_pci *sp_pci = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (sp_pci->msix_count)
+ pci_disable_msix(pdev);
+ else if (sp->psp_irq)
+ pci_disable_msi(pdev);
+
+ sp->ccp_irq = 0;
+ sp->psp_irq = 0;
+}
+
+static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sp_device *sp;
+ struct sp_pci *sp_pci;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret;
+
+ ret = -ENOMEM;
+ sp = sp_alloc_struct(dev);
+ if (!sp)
+ goto e_err;
+
+ sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL);
+ if (!sp_pci)
+ goto e_err;
+
+ sp->dev_specific = sp_pci;
+ sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data;
+ if (!sp->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ sp->io_map = iomap_table[sp->dev_vdata->bar];
+ if (!sp->io_map) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = sp_get_irqs(sp);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, sp);
+
+ ret = sp_init(sp);
+ if (ret)
+ goto e_err;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static void sp_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ if (!sp)
+ return;
+
+ sp_destroy(sp);
+
+ sp_free_irqs(sp);
+
+ dev_notice(dev, "disabled\n");
+}
+
+#ifdef CONFIG_PM
+static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_suspend(sp, state);
+}
+
+static int sp_pci_resume(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3,
+#endif
+ },
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+ },
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5b,
+#endif
+ },
+};
+static const struct pci_device_id sp_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
+ { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
+ { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sp_pci_table);
+
+static struct pci_driver sp_pci_driver = {
+ .name = "ccp",
+ .id_table = sp_pci_table,
+ .probe = sp_pci_probe,
+ .remove = sp_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = sp_pci_suspend,
+ .resume = sp_pci_resume,
+#endif
+};
+
+int sp_pci_init(void)
+{
+ return pci_register_driver(&sp_pci_driver);
+}
+
+void sp_pci_exit(void)
+{
+ pci_unregister_driver(&sp_pci_driver);
+}
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
new file mode 100644
index 000000000000..71734f254fd1
--- /dev/null
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -0,0 +1,256 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/acpi.h>
+
+#include "ccp-dev.h"
+
+struct sp_platform {
+ int coherent;
+ unsigned int irq_count;
+};
+
+static const struct acpi_device_id sp_acpi_match[];
+static const struct of_device_id sp_of_match[];
+
+static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(sp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct sp_dev_vdata *)match->data;
+#endif
+ return NULL;
+}
+
+static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(sp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct sp_dev_vdata *)match->driver_data;
+#endif
+ return NULL;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+ struct sp_platform *sp_platform = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned int i, count;
+ int ret;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (resource_type(res) == IORESOURCE_IRQ)
+ count++;
+ }
+
+ sp_platform->irq_count = count;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ sp->psp_irq = ret;
+ if (count == 1) {
+ sp->ccp_irq = ret;
+ } else {
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0) {
+ dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ sp->ccp_irq = ret;
+ }
+
+ return 0;
+}
+
+static int sp_platform_probe(struct platform_device *pdev)
+{
+ struct sp_device *sp;
+ struct sp_platform *sp_platform;
+ struct device *dev = &pdev->dev;
+ enum dev_dma_attr attr;
+ struct resource *ior;
+ int ret;
+
+ ret = -ENOMEM;
+ sp = sp_alloc_struct(dev);
+ if (!sp)
+ goto e_err;
+
+ sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL);
+ if (!sp_platform)
+ goto e_err;
+
+ sp->dev_specific = sp_platform;
+ sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ : sp_get_acpi_version(pdev);
+ if (!sp->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sp->io_map = devm_ioremap_resource(dev, ior);
+ if (IS_ERR(sp->io_map)) {
+ ret = PTR_ERR(sp->io_map);
+ goto e_err;
+ }
+
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
+ goto e_err;
+ }
+
+ sp_platform->coherent = (attr == DEV_DMA_COHERENT);
+ if (sp_platform->coherent)
+ sp->axcache = CACHE_WB_NO_ALLOC;
+ else
+ sp->axcache = CACHE_NONE;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ ret = sp_get_irqs(sp);
+ if (ret)
+ goto e_err;
+
+ dev_set_drvdata(dev, sp);
+
+ ret = sp_init(sp);
+ if (ret)
+ goto e_err;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static int sp_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ sp_destroy(sp);
+
+ dev_notice(dev, "disabled\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sp_platform_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_suspend(sp, state);
+}
+
+static int sp_platform_resume(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
+
+static struct platform_driver sp_platform_driver = {
+ .driver = {
+ .name = "ccp",
+#ifdef CONFIG_ACPI
+ .acpi_match_table = sp_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = sp_of_match,
+#endif
+ },
+ .probe = sp_platform_probe,
+ .remove = sp_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = sp_platform_suspend,
+ .resume = sp_platform_resume,
+#endif
+};
+
+int sp_platform_init(void)
+{
+ return platform_driver_register(&sp_platform_driver);
+}
+
+void sp_platform_exit(void)
+{
+ platform_driver_unregister(&sp_platform_driver);
+}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index fe538e5287a5..eb2a0a73cbed 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -1,10 +1,10 @@
/* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -30,6 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
+
for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,6 +40,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
+
for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -515,6 +517,7 @@ static void geode_aes_remove(struct pci_dev *dev)
static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
+
ret = pci_enable_device(dev);
if (ret)
return ret;
@@ -570,7 +573,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
static struct pci_device_id geode_aes_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
{ 0, }
};
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 0c6a917a9ab8..b87000a0a01c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -1054,7 +1054,7 @@ res_err:
static int img_hash_remove(struct platform_device *pdev)
{
- static struct img_hash_dev *hdev;
+ struct img_hash_dev *hdev;
hdev = platform_get_drvdata(pdev);
spin_lock(&img_hash.lock);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 1fabd4aee81b..89ba9e85c0f3 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -839,9 +839,10 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
ring_irq);
-
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto err_clk;
+ }
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 000b6500a22d..b182e941b0cd 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -500,7 +500,7 @@ static int mtk_crypto_probe(struct platform_device *pdev)
cryp->irq[i] = platform_get_irq(pdev, i);
if (cryp->irq[i] < 0) {
dev_err(cryp->dev, "no IRQ:%d resource info\n", i);
- return -ENXIO;
+ return cryp->irq[i];
}
}
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index ee4be1b0d30b..e01c46387df8 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -708,8 +708,8 @@ static int mxc_scc_probe(struct platform_device *pdev)
for (i = 0; i < 2; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
- dev_err(dev, "failed to get irq resource\n");
- ret = -EINVAL;
+ dev_err(dev, "failed to get irq resource: %d\n", irq);
+ ret = irq;
goto err_out;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 625ee50fd78b..764be3e6933c 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -908,12 +908,16 @@ static int mxs_dcp_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
- if (dcp_vmi_irq < 0)
+ if (dcp_vmi_irq < 0) {
+ dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
return dcp_vmi_irq;
+ }
dcp_irq = platform_get_irq(pdev, 1);
- if (dcp_irq < 0)
+ if (dcp_irq < 0) {
+ dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
return dcp_irq;
+ }
sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
if (!sdcp)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 269451375b63..a9fd8b9e86cd 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1730,8 +1730,8 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
continue;
id = mdesc_get_property(mdesc, tgt, "id", NULL);
if (table[*id] != NULL) {
- dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
+ dev->dev.of_node);
return -EINVAL;
}
cpumask_set_cpu(*id, &p->sharing);
@@ -1751,8 +1751,8 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
if (!p) {
- dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
@@ -1981,41 +1981,39 @@ static void n2_spu_driver_version(void)
static int n2_crypto_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
- const char *full_name;
struct n2_crypto *np;
int err;
n2_spu_driver_version();
- full_name = dev->dev.of_node->full_name;
- pr_info("Found N2CP at %s\n", full_name);
+ pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
np = alloc_n2cp();
if (!np) {
- dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
err = grab_global_resources();
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab "
- "global resources.\n", full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+ dev->dev.of_node);
goto out_free_n2cp;
}
mdesc = mdesc_grab();
if (!mdesc) {
- dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+ dev->dev.of_node);
err = -ENODEV;
goto out_free_global;
}
err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+ dev->dev.of_node);
mdesc_release(mdesc);
goto out_free_global;
}
@@ -2026,15 +2024,15 @@ static int n2_crypto_probe(struct platform_device *dev)
mdesc_release(mdesc);
if (err) {
- dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
+ dev->dev.of_node);
goto out_free_global;
}
err = n2_register_algs();
if (err) {
- dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
+ dev->dev.of_node);
goto out_free_spu_list;
}
@@ -2092,42 +2090,40 @@ static void free_ncp(struct n2_mau *mp)
static int n2_mau_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
- const char *full_name;
struct n2_mau *mp;
int err;
n2_spu_driver_version();
- full_name = dev->dev.of_node->full_name;
- pr_info("Found NCP at %s\n", full_name);
+ pr_info("Found NCP at %pOF\n", dev->dev.of_node);
mp = alloc_ncp();
if (!mp) {
- dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
err = grab_global_resources();
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab "
- "global resources.\n", full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+ dev->dev.of_node);
goto out_free_ncp;
}
mdesc = mdesc_grab();
if (!mdesc) {
- dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+ dev->dev.of_node);
err = -ENODEV;
goto out_free_global;
}
err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+ dev->dev.of_node);
mdesc_release(mdesc);
goto out_free_global;
}
@@ -2138,8 +2134,8 @@ static int n2_mau_probe(struct platform_device *dev)
mdesc_release(mdesc);
if (err) {
- dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
+ dev->dev.of_node);
goto out_free_global;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 5120a17731d0..c376a3ee7c2c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1095,6 +1095,7 @@ static int omap_aes_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "can't get IRQ resource\n");
+ err = irq;
goto err_irq;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 0bcab00e0ff5..d37c9506c36c 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1023,7 +1023,8 @@ static int omap_des_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "can't get IRQ resource\n");
+ dev_err(dev, "can't get IRQ resource: %d\n", irq);
+ err = irq;
goto err_irq;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 9ad9d399daf1..c40ac30ec002 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2133,7 +2133,7 @@ data_err:
static int omap_sham_remove(struct platform_device *pdev)
{
- static struct omap_sham_dev *dd;
+ struct omap_sham_dev *dd;
int i, j;
dd = platform_get_drvdata(pdev);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index d3e25c37dc33..da8a2d3b5e9a 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -208,7 +208,7 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
static void adf_resume(struct pci_dev *pdev)
{
dev_info(&pdev->dev, "Acceleration driver reset completed\n");
- dev_info(&pdev->dev, "Device is up and runnig\n");
+ dev_info(&pdev->dev, "Device is up and running\n");
}
static const struct pci_error_handlers adf_err_handler = {
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index d0f80c6241f9..c9d622abd90c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -169,50 +169,82 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
{
struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
u32 interrupt_status;
- int err = 0;
spin_lock(&dev->lock);
interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
if (interrupt_status & 0x0a) {
dev_warn(dev->dev, "DMA Error\n");
- err = -EFAULT;
- } else if (interrupt_status & 0x05) {
- err = dev->update(dev);
+ dev->err = -EFAULT;
}
- if (err)
- dev->complete(dev, err);
+ tasklet_schedule(&dev->done_task);
+
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
-static void rk_crypto_tasklet_cb(unsigned long data)
+static int rk_crypto_enqueue(struct rk_crypto_info *dev,
+ struct crypto_async_request *async_req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = crypto_enqueue_request(&dev->queue, async_req);
+ if (dev->busy) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+ }
+ dev->busy = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ tasklet_schedule(&dev->queue_task);
+
+ return ret;
+}
+
+static void rk_crypto_queue_task_cb(unsigned long data)
{
struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int err = 0;
+ dev->err = 0;
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- spin_unlock_irqrestore(&dev->lock, flags);
+
if (!async_req) {
- dev_err(dev->dev, "async_req is NULL !!\n");
+ dev->busy = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
return;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
backlog = NULL;
}
- if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
- dev->ablk_req = ablkcipher_request_cast(async_req);
- else
- dev->ahash_req = ahash_request_cast(async_req);
+ dev->async_req = async_req;
err = dev->start(dev);
if (err)
- dev->complete(dev, err);
+ dev->complete(dev->async_req, err);
+}
+
+static void rk_crypto_done_task_cb(unsigned long data)
+{
+ struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+
+ if (dev->err) {
+ dev->complete(dev->async_req, dev->err);
+ return;
+ }
+
+ dev->err = dev->update(dev);
+ if (dev->err)
+ dev->complete(dev->async_req, dev->err);
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
@@ -361,14 +393,18 @@ static int rk_crypto_probe(struct platform_device *pdev)
crypto_info->dev = &pdev->dev;
platform_set_drvdata(pdev, crypto_info);
- tasklet_init(&crypto_info->crypto_tasklet,
- rk_crypto_tasklet_cb, (unsigned long)crypto_info);
+ tasklet_init(&crypto_info->queue_task,
+ rk_crypto_queue_task_cb, (unsigned long)crypto_info);
+ tasklet_init(&crypto_info->done_task,
+ rk_crypto_done_task_cb, (unsigned long)crypto_info);
crypto_init_queue(&crypto_info->queue, 50);
crypto_info->enable_clk = rk_crypto_enable_clk;
crypto_info->disable_clk = rk_crypto_disable_clk;
crypto_info->load_data = rk_load_data;
crypto_info->unload_data = rk_unload_data;
+ crypto_info->enqueue = rk_crypto_enqueue;
+ crypto_info->busy = false;
err = rk_crypto_register(crypto_info);
if (err) {
@@ -380,7 +416,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
return 0;
err_register_alg:
- tasklet_kill(&crypto_info->crypto_tasklet);
+ tasklet_kill(&crypto_info->queue_task);
+ tasklet_kill(&crypto_info->done_task);
err_crypto:
return err;
}
@@ -390,7 +427,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
rk_crypto_unregister();
- tasklet_kill(&crypto_tmp->crypto_tasklet);
+ tasklet_kill(&crypto_tmp->done_task);
+ tasklet_kill(&crypto_tmp->queue_task);
return 0;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index d7b71fea320b..ab6a1b4c40f0 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -190,9 +190,10 @@ struct rk_crypto_info {
void __iomem *reg;
int irq;
struct crypto_queue queue;
- struct tasklet_struct crypto_tasklet;
- struct ablkcipher_request *ablk_req;
- struct ahash_request *ahash_req;
+ struct tasklet_struct queue_task;
+ struct tasklet_struct done_task;
+ struct crypto_async_request *async_req;
+ int err;
/* device lock */
spinlock_t lock;
@@ -208,18 +209,20 @@ struct rk_crypto_info {
size_t nents;
unsigned int total;
unsigned int count;
- u32 mode;
dma_addr_t addr_in;
dma_addr_t addr_out;
+ bool busy;
int (*start)(struct rk_crypto_info *dev);
int (*update)(struct rk_crypto_info *dev);
- void (*complete)(struct rk_crypto_info *dev, int err);
+ void (*complete)(struct crypto_async_request *base, int err);
int (*enable_clk)(struct rk_crypto_info *dev);
void (*disable_clk)(struct rk_crypto_info *dev);
int (*load_data)(struct rk_crypto_info *dev,
struct scatterlist *sg_src,
struct scatterlist *sg_dst);
void (*unload_data)(struct rk_crypto_info *dev);
+ int (*enqueue)(struct rk_crypto_info *dev,
+ struct crypto_async_request *async_req);
};
/* the private variable of hash */
@@ -232,12 +235,14 @@ struct rk_ahash_ctx {
/* the privete variable of hash for fallback */
struct rk_ahash_rctx {
struct ahash_request fallback_req;
+ u32 mode;
};
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
+ u32 mode;
};
enum alg_type {
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index b5a3afe222e4..639c15c5364b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -15,35 +15,19 @@
#define RK_CRYPTO_DEC BIT(0)
-static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
{
- if (dev->ablk_req->base.complete)
- dev->ablk_req->base.complete(&dev->ablk_req->base, err);
+ if (base->complete)
+ base->complete(base, err);
}
static int rk_handle_req(struct rk_crypto_info *dev,
struct ablkcipher_request *req)
{
- unsigned long flags;
- int err;
-
if (!IS_ALIGNED(req->nbytes, dev->align_size))
return -EINVAL;
-
- dev->left_bytes = req->nbytes;
- dev->total = req->nbytes;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->aligned = 1;
- dev->ablk_req = req;
-
- spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->crypto_tasklet);
- return err;
+ else
+ return dev->enqueue(dev, &req->base);
}
static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
@@ -93,7 +77,7 @@ static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_ECB_MODE;
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE;
return rk_handle_req(dev, req);
}
@@ -103,7 +87,7 @@ static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -113,7 +97,7 @@ static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_CBC_MODE;
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE;
return rk_handle_req(dev, req);
}
@@ -123,7 +107,7 @@ static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -133,7 +117,7 @@ static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = 0;
+ ctx->mode = 0;
return rk_handle_req(dev, req);
}
@@ -143,7 +127,7 @@ static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -153,7 +137,7 @@ static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
@@ -163,7 +147,7 @@ static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -173,7 +157,7 @@ static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT;
+ ctx->mode = RK_CRYPTO_TDES_SELECT;
return rk_handle_req(dev, req);
}
@@ -183,7 +167,7 @@ static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -193,7 +177,7 @@ static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
@@ -203,15 +187,16 @@ static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static void rk_ablk_hw_init(struct rk_crypto_info *dev)
{
- struct crypto_ablkcipher *cipher =
- crypto_ablkcipher_reqtfm(dev->ablk_req);
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
u32 ivsize, block, conf_reg = 0;
@@ -220,25 +205,23 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
ivsize = crypto_ablkcipher_ivsize(cipher);
if (block == DES_BLOCK_SIZE) {
- dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
RK_CRYPTO_TDES_BYTESWAP_KEY |
RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
- dev->ablk_req->info, ivsize);
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
conf_reg = RK_CRYPTO_DESSEL;
} else {
- dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
RK_CRYPTO_AES_KEY_CHANGE |
RK_CRYPTO_AES_BYTESWAP_KEY |
RK_CRYPTO_AES_BYTESWAP_IV;
if (ctx->keylen == AES_KEYSIZE_192)
- dev->mode |= RK_CRYPTO_AES_192BIT_key;
+ ctx->mode |= RK_CRYPTO_AES_192BIT_key;
else if (ctx->keylen == AES_KEYSIZE_256)
- dev->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
- dev->ablk_req->info, ivsize);
+ ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
}
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
RK_CRYPTO_BYTESWAP_BRFIFO;
@@ -268,8 +251,18 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
static int rk_ablk_start(struct rk_crypto_info *dev)
{
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
unsigned long flags;
- int err;
+ int err = 0;
+
+ dev->left_bytes = req->nbytes;
+ dev->total = req->nbytes;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+ dev->sg_dst = req->dst;
+ dev->aligned = 1;
spin_lock_irqsave(&dev->lock, flags);
rk_ablk_hw_init(dev);
@@ -280,15 +273,16 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
static void rk_iv_copyback(struct rk_crypto_info *dev)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
if (ivsize == DES_BLOCK_SIZE)
- memcpy_fromio(dev->ablk_req->info,
- dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
+ memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
+ ivsize);
else if (ivsize == AES_BLOCK_SIZE)
- memcpy_fromio(dev->ablk_req->info,
- dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+ memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
}
/* return:
@@ -298,10 +292,12 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
static int rk_ablk_rx(struct rk_crypto_info *dev)
{
int err = 0;
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
dev->unload_data(dev);
if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
+ if (!sg_pcopy_from_buffer(req->dst, dev->nents,
dev->addr_vir, dev->count,
dev->total - dev->left_bytes -
dev->count)) {
@@ -324,7 +320,8 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
} else {
rk_iv_copyback(dev);
/* here show the calculation is over without any err */
- dev->complete(dev, 0);
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
}
out_rx:
return err;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 718588219f75..821a506b9e17 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -40,14 +40,16 @@ static int zero_message_process(struct ahash_request *req)
return 0;
}
-static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
+static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
{
- if (dev->ahash_req->base.complete)
- dev->ahash_req->base.complete(&dev->ahash_req->base, err);
+ if (base->complete)
+ base->complete(base, err);
}
static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
int reg_status = 0;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
@@ -67,7 +69,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
RK_CRYPTO_HRDMA_DONE_INT);
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
RK_CRYPTO_HASH_SWAP_DO);
CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
@@ -164,64 +166,13 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
static int rk_ahash_digest(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct rk_crypto_info *dev = NULL;
- unsigned long flags;
- int ret;
+ struct rk_crypto_info *dev = tctx->dev;
if (!req->nbytes)
return zero_message_process(req);
-
- dev = tctx->dev;
- dev->total = req->nbytes;
- dev->left_bytes = req->nbytes;
- dev->aligned = 0;
- dev->mode = 0;
- dev->align_size = 4;
- dev->sg_dst = NULL;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->nents = sg_nents(req->src);
-
- switch (crypto_ahash_digestsize(tfm)) {
- case SHA1_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_SHA1;
- break;
- case SHA256_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_SHA256;
- break;
- case MD5_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_MD5;
- break;
- default:
- return -EINVAL;
- }
-
- rk_ahash_reg_init(dev);
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- tasklet_schedule(&dev->crypto_tasklet);
-
- /*
- * it will take some time to process date after last dma transmission.
- *
- * waiting time is relative with the last date len,
- * so cannot set a fixed time here.
- * 10-50 makes system not call here frequently wasting
- * efficiency, and make it response quickly when dma
- * complete.
- */
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
- usleep_range(10, 50);
-
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
- crypto_ahash_digestsize(tfm));
-
- return 0;
+ else
+ return dev->enqueue(dev, &req->base);
}
static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
@@ -244,12 +195,45 @@ static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
static int rk_ahash_start(struct rk_crypto_info *dev)
{
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct crypto_ahash *tfm;
+ struct rk_ahash_rctx *rctx;
+
+ dev->total = req->nbytes;
+ dev->left_bytes = req->nbytes;
+ dev->aligned = 0;
+ dev->align_size = 4;
+ dev->sg_dst = NULL;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+ rctx = ahash_request_ctx(req);
+ rctx->mode = 0;
+
+ tfm = crypto_ahash_reqtfm(req);
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rk_ahash_reg_init(dev);
return rk_ahash_set_data_start(dev);
}
static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
{
int err = 0;
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct crypto_ahash *tfm;
dev->unload_data(dev);
if (dev->left_bytes) {
@@ -264,7 +248,24 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
}
err = rk_ahash_set_data_start(dev);
} else {
- dev->complete(dev, 0);
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10us makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+ udelay(10);
+
+ tfm = crypto_ahash_reqtfm(req);
+ memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+ crypto_ahash_digestsize(tfm));
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
}
out_rx:
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 1d9ecd368b5b..08e7bdcaa6e3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -202,7 +202,6 @@ struct sahara_dev {
struct completion dma_completion;
struct sahara_ctx *ctx;
- spinlock_t lock;
struct crypto_queue queue;
unsigned long flags;
@@ -543,10 +542,10 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
unmap_out:
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return -EINVAL;
}
@@ -594,9 +593,9 @@ static int sahara_aes_process(struct ablkcipher_request *req)
}
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_FROM_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
return 0;
}
@@ -1376,13 +1375,13 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
crypto_unregister_ahash(&sha_v4_algs[i]);
}
-static struct platform_device_id sahara_platform_ids[] = {
+static const struct platform_device_id sahara_platform_ids[] = {
{ .name = "sahara-imx27" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
-static struct of_device_id sahara_dt_ids[] = {
+static const struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
@@ -1487,7 +1486,6 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- spin_lock_init(&dev->lock);
mutex_init(&dev->queue_mutex);
dev_ptr = dev;
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 09b4ec87c212..602332e02729 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,7 +1,20 @@
-config CRYPTO_DEV_STM32
- tristate "Support for STM32 crypto accelerators"
+config CRC_DEV_STM32
+ tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
help
This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronis STM32 SOC.
+ on STMicroelectronics STM32 SOC.
+
+config HASH_DEV_STM32
+ tristate "Support for STM32 hash accelerators"
+ depends on ARCH_STM32
+ depends on HAS_DMA
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_ENGINE
+ help
+ This enables support for the HASH hw accelerator which can be found
+ on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73b4c6e47f5f..73cd56cad0cc 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o
-stm32_cryp-objs := stm32_crc32.o
+obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
+obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
new file mode 100644
index 000000000000..b585ce54a802
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -0,0 +1,1575 @@
+/*
+ * This file is part of STM32 Crypto driver for Linux.
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define HASH_CR 0x00
+#define HASH_DIN 0x04
+#define HASH_STR 0x08
+#define HASH_IMR 0x20
+#define HASH_SR 0x24
+#define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
+#define HASH_HREG(x) (0x310 + ((x) * 0x04))
+#define HASH_HWCFGR 0x3F0
+#define HASH_VER 0x3F4
+#define HASH_ID 0x3F8
+
+/* Control Register */
+#define HASH_CR_INIT BIT(2)
+#define HASH_CR_DMAE BIT(3)
+#define HASH_CR_DATATYPE_POS 4
+#define HASH_CR_MODE BIT(6)
+#define HASH_CR_MDMAT BIT(13)
+#define HASH_CR_DMAA BIT(14)
+#define HASH_CR_LKEY BIT(16)
+
+#define HASH_CR_ALGO_SHA1 0x0
+#define HASH_CR_ALGO_MD5 0x80
+#define HASH_CR_ALGO_SHA224 0x40000
+#define HASH_CR_ALGO_SHA256 0x40080
+
+/* Interrupt */
+#define HASH_DINIE BIT(0)
+#define HASH_DCIE BIT(1)
+
+/* Interrupt Mask */
+#define HASH_MASK_CALC_COMPLETION BIT(0)
+#define HASH_MASK_DATA_INPUT BIT(1)
+
+/* Context swap register */
+#define HASH_CSR_REGISTER_NUMBER 53
+
+/* Status Flags */
+#define HASH_SR_DATA_INPUT_READY BIT(0)
+#define HASH_SR_OUTPUT_READY BIT(1)
+#define HASH_SR_DMA_ACTIVE BIT(2)
+#define HASH_SR_BUSY BIT(3)
+
+/* STR Register */
+#define HASH_STR_NBLW_MASK GENMASK(4, 0)
+#define HASH_STR_DCAL BIT(8)
+
+#define HASH_FLAGS_INIT BIT(0)
+#define HASH_FLAGS_OUTPUT_READY BIT(1)
+#define HASH_FLAGS_CPU BIT(2)
+#define HASH_FLAGS_DMA_READY BIT(3)
+#define HASH_FLAGS_DMA_ACTIVE BIT(4)
+#define HASH_FLAGS_HMAC_INIT BIT(5)
+#define HASH_FLAGS_HMAC_FINAL BIT(6)
+#define HASH_FLAGS_HMAC_KEY BIT(7)
+
+#define HASH_FLAGS_FINAL BIT(15)
+#define HASH_FLAGS_FINUP BIT(16)
+#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
+#define HASH_FLAGS_MD5 BIT(18)
+#define HASH_FLAGS_SHA1 BIT(19)
+#define HASH_FLAGS_SHA224 BIT(20)
+#define HASH_FLAGS_SHA256 BIT(21)
+#define HASH_FLAGS_ERRORS BIT(22)
+#define HASH_FLAGS_HMAC BIT(23)
+
+#define HASH_OP_UPDATE 1
+#define HASH_OP_FINAL 2
+
+enum stm32_hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+#define HASH_BUFLEN 256
+#define HASH_LONG_KEY 64
+#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
+#define HASH_QUEUE_LENGTH 16
+#define HASH_DMA_THRESHOLD 50
+
+struct stm32_hash_ctx {
+ struct stm32_hash_dev *hdev;
+ unsigned long flags;
+
+ u8 key[HASH_MAX_KEY_SIZE];
+ int keylen;
+};
+
+struct stm32_hash_request_ctx {
+ struct stm32_hash_dev *hdev;
+ unsigned long flags;
+ unsigned long op;
+
+ u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ size_t digcnt;
+ size_t bufcnt;
+ size_t buflen;
+
+ /* DMA */
+ struct scatterlist *sg;
+ unsigned int offset;
+ unsigned int total;
+ struct scatterlist sg_key;
+
+ dma_addr_t dma_addr;
+ size_t dma_ct;
+ int nents;
+
+ u8 data_type;
+
+ u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
+
+ /* Export Context */
+ u32 *hw_context;
+};
+
+struct stm32_hash_algs_info {
+ struct ahash_alg *algs_list;
+ size_t size;
+};
+
+struct stm32_hash_pdata {
+ struct stm32_hash_algs_info *algs_info;
+ size_t algs_info_size;
+};
+
+struct stm32_hash_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *io_base;
+ phys_addr_t phys_base;
+ u32 dma_mode;
+ u32 dma_maxburst;
+
+ spinlock_t lock; /* lock to protect queue */
+
+ struct ahash_request *req;
+ struct crypto_engine *engine;
+
+ int err;
+ unsigned long flags;
+
+ struct dma_chan *dma_lch;
+ struct completion dma_completion;
+
+ const struct stm32_hash_pdata *pdata;
+};
+
+struct stm32_hash_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* List protection access */
+};
+
+static struct stm32_hash_drv stm32_hash = {
+ .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
+};
+
+static void stm32_hash_dma_callback(void *param);
+
+static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
+{
+ return readl_relaxed(hdev->io_base + offset);
+}
+
+static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, hdev->io_base + offset);
+}
+
+static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
+ !(status & HASH_SR_BUSY), 10, 10000);
+}
+
+static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
+{
+ u32 reg;
+
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg &= ~(HASH_STR_NBLW_MASK);
+ reg |= (8U * ((length) % 4U));
+ stm32_hash_write(hdev, HASH_STR, reg);
+}
+
+static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 reg;
+ int keylen = ctx->keylen;
+ void *key = ctx->key;
+
+ if (keylen) {
+ stm32_hash_set_nblw(hdev, keylen);
+
+ while (keylen > 0) {
+ stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
+ keylen -= 4;
+ key += 4;
+ }
+
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ u32 reg = HASH_CR_INIT;
+
+ if (!(hdev->flags & HASH_FLAGS_INIT)) {
+ switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ case HASH_FLAGS_MD5:
+ reg |= HASH_CR_ALGO_MD5;
+ break;
+ case HASH_FLAGS_SHA1:
+ reg |= HASH_CR_ALGO_SHA1;
+ break;
+ case HASH_FLAGS_SHA224:
+ reg |= HASH_CR_ALGO_SHA224;
+ break;
+ case HASH_FLAGS_SHA256:
+ reg |= HASH_CR_ALGO_SHA256;
+ break;
+ default:
+ reg |= HASH_CR_ALGO_MD5;
+ }
+
+ reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
+
+ if (rctx->flags & HASH_FLAGS_HMAC) {
+ hdev->flags |= HASH_FLAGS_HMAC;
+ reg |= HASH_CR_MODE;
+ if (ctx->keylen > HASH_LONG_KEY)
+ reg |= HASH_CR_LKEY;
+ }
+
+ stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
+
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ hdev->flags |= HASH_FLAGS_INIT;
+
+ dev_dbg(hdev->dev, "Write Control %x\n", reg);
+ }
+}
+
+static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
+{
+ size_t count;
+
+ while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+ count = min(rctx->sg->length - rctx->offset, rctx->total);
+ count = min(count, rctx->buflen - rctx->bufcnt);
+
+ if (count <= 0) {
+ if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
+ rctx->sg = sg_next(rctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
+ rctx->offset, count, 0);
+
+ rctx->bufcnt += count;
+ rctx->offset += count;
+ rctx->total -= count;
+
+ if (rctx->offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ if (rctx->sg)
+ rctx->offset = 0;
+ else
+ rctx->total = 0;
+ }
+ }
+}
+
+static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
+ const u8 *buf, size_t length, int final)
+{
+ unsigned int count, len32;
+ const u32 *buffer = (const u32 *)buf;
+ u32 reg;
+
+ if (final)
+ hdev->flags |= HASH_FLAGS_FINAL;
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
+ __func__, length, final, len32);
+
+ hdev->flags |= HASH_FLAGS_CPU;
+
+ stm32_hash_write_ctrl(hdev);
+
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
+ if ((hdev->flags & HASH_FLAGS_HMAC) &&
+ (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+ hdev->flags |= HASH_FLAGS_HMAC_KEY;
+ stm32_hash_write_key(hdev);
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ }
+
+ for (count = 0; count < len32; count++)
+ stm32_hash_write(hdev, HASH_DIN, buffer[count]);
+
+ if (final) {
+ stm32_hash_set_nblw(hdev, length);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ stm32_hash_write_key(hdev);
+ }
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ int bufcnt, err = 0, final;
+
+ dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+
+ final = (rctx->flags & HASH_FLAGS_FINUP);
+
+ while ((rctx->total >= rctx->buflen) ||
+ (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ stm32_hash_append_sg(rctx);
+ bufcnt = rctx->bufcnt;
+ rctx->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+ }
+
+ stm32_hash_append_sg(rctx);
+
+ if (final) {
+ bufcnt = rctx->bufcnt;
+ rctx->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
+ (rctx->flags & HASH_FLAGS_FINUP));
+ }
+
+ return err;
+}
+
+static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
+ struct scatterlist *sg, int length, int mdma)
+{
+ struct dma_async_tx_descriptor *in_desc;
+ dma_cookie_t cookie;
+ u32 reg;
+ int err;
+
+ in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!in_desc) {
+ dev_err(hdev->dev, "dmaengine_prep_slave error\n");
+ return -ENOMEM;
+ }
+
+ reinit_completion(&hdev->dma_completion);
+ in_desc->callback = stm32_hash_dma_callback;
+ in_desc->callback_param = hdev;
+
+ hdev->flags |= HASH_FLAGS_FINAL;
+ hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
+
+ reg = stm32_hash_read(hdev, HASH_CR);
+
+ if (mdma)
+ reg |= HASH_CR_MDMAT;
+ else
+ reg &= ~HASH_CR_MDMAT;
+
+ reg |= HASH_CR_DMAE;
+
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ stm32_hash_set_nblw(hdev, length);
+
+ cookie = dmaengine_submit(in_desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ return -ENOMEM;
+
+ dma_async_issue_pending(hdev->dma_lch);
+
+ if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
+ msecs_to_jiffies(100)))
+ err = -ETIMEDOUT;
+
+ if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
+ NULL, NULL) != DMA_COMPLETE)
+ err = -ETIMEDOUT;
+
+ if (err) {
+ dev_err(hdev->dev, "DMA Error %i\n", err);
+ dmaengine_terminate_all(hdev->dma_lch);
+ return err;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void stm32_hash_dma_callback(void *param)
+{
+ struct stm32_hash_dev *hdev = param;
+
+ complete(&hdev->dma_completion);
+
+ hdev->flags |= HASH_FLAGS_DMA_READY;
+}
+
+static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ int err;
+
+ if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+ err = stm32_hash_write_key(hdev);
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ } else {
+ if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
+ sg_init_one(&rctx->sg_key, ctx->key,
+ ALIGN(ctx->keylen, sizeof(u32)));
+
+ rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
+ DMA_TO_DEVICE);
+ if (rctx->dma_ct == 0) {
+ dev_err(hdev->dev, "dma_map_sg error\n");
+ return -ENOMEM;
+ }
+
+ err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
+
+ dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
+ }
+
+ return err;
+}
+
+static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
+{
+ struct dma_slave_config dma_conf;
+ int err;
+
+ memset(&dma_conf, 0, sizeof(dma_conf));
+
+ dma_conf.direction = DMA_MEM_TO_DEV;
+ dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
+ dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_conf.src_maxburst = hdev->dma_maxburst;
+ dma_conf.dst_maxburst = hdev->dma_maxburst;
+ dma_conf.device_fc = false;
+
+ hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
+ if (!hdev->dma_lch) {
+ dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
+ return -EBUSY;
+ }
+
+ err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
+ if (err) {
+ dma_release_channel(hdev->dma_lch);
+ hdev->dma_lch = NULL;
+ dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
+ return err;
+ }
+
+ init_completion(&hdev->dma_completion);
+
+ return 0;
+}
+
+static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct scatterlist sg[1], *tsg;
+ int err = 0, len = 0, reg, ncp;
+ unsigned int i;
+ const u32 *buffer = (const u32 *)rctx->buffer;
+
+ rctx->sg = hdev->req->src;
+ rctx->total = hdev->req->nbytes;
+
+ rctx->nents = sg_nents(rctx->sg);
+
+ if (rctx->nents < 0)
+ return -EINVAL;
+
+ stm32_hash_write_ctrl(hdev);
+
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ err = stm32_hash_hmac_dma_send(hdev);
+ if (err != -EINPROGRESS)
+ return err;
+ }
+
+ for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ len = sg->length;
+
+ sg[0] = *tsg;
+ if (sg_is_last(sg)) {
+ if (hdev->dma_mode == 1) {
+ len = (ALIGN(sg->length, 16) - 16);
+
+ ncp = sg_pcopy_to_buffer(
+ rctx->sg, rctx->nents,
+ rctx->buffer, sg->length - len,
+ rctx->total - sg->length + len);
+
+ sg->length = len;
+ } else {
+ if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
+ len = sg->length;
+ sg->length = ALIGN(sg->length,
+ sizeof(u32));
+ }
+ }
+ }
+
+ rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
+ DMA_TO_DEVICE);
+ if (rctx->dma_ct == 0) {
+ dev_err(hdev->dev, "dma_map_sg error\n");
+ return -ENOMEM;
+ }
+
+ err = stm32_hash_xmit_dma(hdev, sg, len,
+ !sg_is_last(sg));
+
+ dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ if (err == -ENOMEM)
+ return err;
+ }
+
+ if (hdev->dma_mode == 1) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ reg = stm32_hash_read(hdev, HASH_CR);
+ reg &= ~HASH_CR_DMAE;
+ reg |= HASH_CR_DMAA;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
+ stm32_hash_write(hdev, HASH_DIN, buffer[i]);
+
+ stm32_hash_set_nblw(hdev, ncp);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ err = -EINPROGRESS;
+ }
+
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ err = stm32_hash_hmac_dma_send(hdev);
+ }
+
+ return err;
+}
+
+static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
+{
+ struct stm32_hash_dev *hdev = NULL, *tmp;
+
+ spin_lock_bh(&stm32_hash.lock);
+ if (!ctx->hdev) {
+ list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
+ hdev = tmp;
+ break;
+ }
+ ctx->hdev = hdev;
+ } else {
+ hdev = ctx->hdev;
+ }
+
+ spin_unlock_bh(&stm32_hash.lock);
+
+ return hdev;
+}
+
+static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
+{
+ struct scatterlist *sg;
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ int i;
+
+ if (req->nbytes <= HASH_DMA_THRESHOLD)
+ return false;
+
+ if (sg_nents(req->src) > 1) {
+ if (hdev->dma_mode == 1)
+ return false;
+ for_each_sg(req->src, sg, sg_nents(req->src), i) {
+ if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
+ (!sg_is_last(sg)))
+ return false;
+ }
+ }
+
+ if (req->src->offset % 4)
+ return false;
+
+ return true;
+}
+
+static int stm32_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+
+ rctx->hdev = hdev;
+
+ rctx->flags = HASH_FLAGS_CPU;
+
+ rctx->digcnt = crypto_ahash_digestsize(tfm);
+ switch (rctx->digcnt) {
+ case MD5_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_MD5;
+ break;
+ case SHA1_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA1;
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->buflen = HASH_BUFLEN;
+ rctx->total = 0;
+ rctx->offset = 0;
+ rctx->data_type = HASH_DATA_8_BITS;
+
+ memset(rctx->buffer, 0, HASH_BUFLEN);
+
+ if (ctx->flags & HASH_FLAGS_HMAC)
+ rctx->flags |= HASH_FLAGS_HMAC;
+
+ dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+
+ return 0;
+}
+
+static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
+{
+ return stm32_hash_update_cpu(hdev);
+}
+
+static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
+{
+ struct ahash_request *req = hdev->req;
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int err;
+
+ if (!(rctx->flags & HASH_FLAGS_CPU))
+ err = stm32_hash_dma_send(hdev);
+ else
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
+
+ rctx->bufcnt = 0;
+
+ return err;
+}
+
+static void stm32_hash_copy_hash(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)rctx->digest;
+ unsigned int i, hashsize;
+
+ switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ case HASH_FLAGS_MD5:
+ hashsize = MD5_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA224:
+ hashsize = SHA224_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < hashsize / sizeof(u32); i++)
+ hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
+ HASH_HREG(i)));
+}
+
+static int stm32_hash_finish(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return -EINVAL;
+
+ memcpy(req->result, rctx->digest, rctx->digcnt);
+
+ return 0;
+}
+
+static void stm32_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = rctx->hdev;
+
+ if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
+ stm32_hash_copy_hash(req);
+ err = stm32_hash_finish(req);
+ hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
+ HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
+ HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
+ HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
+ HASH_FLAGS_HMAC_KEY);
+ } else {
+ rctx->flags |= HASH_FLAGS_ERRORS;
+ }
+
+ crypto_finalize_hash_request(hdev->engine, req, err);
+}
+
+static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
+ struct stm32_hash_request_ctx *rctx)
+{
+ if (!(HASH_FLAGS_INIT & hdev->flags)) {
+ stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
+ stm32_hash_write(hdev, HASH_STR, 0);
+ stm32_hash_write(hdev, HASH_DIN, 0);
+ stm32_hash_write(hdev, HASH_IMR, 0);
+ hdev->err = 0;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(hdev->engine, req);
+}
+
+static int stm32_hash_prepare_req(struct crypto_engine *engine,
+ struct ahash_request *req)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_request_ctx *rctx;
+
+ if (!hdev)
+ return -ENODEV;
+
+ hdev->req = req;
+
+ rctx = ahash_request_ctx(req);
+
+ dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
+ rctx->op, req->nbytes);
+
+ return stm32_hash_hw_init(hdev, rctx);
+}
+
+static int stm32_hash_one_request(struct crypto_engine *engine,
+ struct ahash_request *req)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_request_ctx *rctx;
+ int err = 0;
+
+ if (!hdev)
+ return -ENODEV;
+
+ hdev->req = req;
+
+ rctx = ahash_request_ctx(req);
+
+ if (rctx->op == HASH_OP_UPDATE)
+ err = stm32_hash_update_req(hdev);
+ else if (rctx->op == HASH_OP_FINAL)
+ err = stm32_hash_final_req(hdev);
+
+ if (err != -EINPROGRESS)
+ /* done task will not finish it, so do it here */
+ stm32_hash_finish_req(req, err);
+
+ return 0;
+}
+
+static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct stm32_hash_dev *hdev = ctx->hdev;
+
+ rctx->op = op;
+
+ return stm32_hash_handle_queue(hdev, req);
+}
+
+static int stm32_hash_update(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int ret;
+
+ if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+ return 0;
+
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+ stm32_hash_append_sg(rctx);
+ return 0;
+ }
+
+ ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
+
+ if (rctx->flags & HASH_FLAGS_FINUP)
+ return ret;
+
+ return 0;
+}
+
+static int stm32_hash_final(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ return stm32_hash_enqueue(req, HASH_OP_FINAL);
+}
+
+static int stm32_hash_finup(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ int err1, err2;
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+ rctx->flags &= ~HASH_FLAGS_CPU;
+
+ err1 = stm32_hash_update(req);
+
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ err2 = stm32_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+static int stm32_hash_digest(struct ahash_request *req)
+{
+ return stm32_hash_init(req) ?: stm32_hash_finup(req);
+}
+
+static int stm32_hash_export(struct ahash_request *req, void *out)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ u32 *preg;
+ unsigned int i;
+
+ while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+ cpu_relax();
+
+ rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
+ GFP_KERNEL);
+
+ preg = rctx->hw_context;
+
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int stm32_hash_import(struct ahash_request *req, const void *in)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ const u32 *preg = in;
+ u32 reg;
+ unsigned int i;
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ preg = rctx->hw_context;
+
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
+ stm32_hash_write(hdev, HASH_STR, *preg++);
+ stm32_hash_write(hdev, HASH_CR, *preg);
+ reg = *preg++ | HASH_CR_INIT;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+
+ kfree(rctx->hw_context);
+
+ return 0;
+}
+
+static int stm32_hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= HASH_MAX_KEY_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
+ const char *algs_hmac_name)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct stm32_hash_request_ctx));
+
+ ctx->keylen = 0;
+
+ if (algs_hmac_name)
+ ctx->flags |= HASH_FLAGS_HMAC;
+
+ return 0;
+}
+
+static int stm32_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, NULL);
+}
+
+static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "md5");
+}
+
+static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha1");
+}
+
+static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha224");
+}
+
+static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha256");
+}
+
+static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
+{
+ struct stm32_hash_dev *hdev = dev_id;
+ int err;
+
+ if (HASH_FLAGS_CPU & hdev->flags) {
+ if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
+ goto finish;
+ }
+ } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
+ if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+ goto finish;
+ }
+ }
+
+ return IRQ_HANDLED;
+
+finish:
+ /*Finish current request */
+ stm32_hash_finish_req(hdev->req, err);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
+{
+ struct stm32_hash_dev *hdev = dev_id;
+ u32 reg;
+
+ reg = stm32_hash_read(hdev, HASH_SR);
+ if (reg & HASH_SR_OUTPUT_READY) {
+ reg &= ~HASH_SR_OUTPUT_READY;
+ stm32_hash_write(hdev, HASH_SR, reg);
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct ahash_alg algs_md5_sha1[] = {
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "stm32-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "stm32-hmac-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_md5_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "stm32-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "stm32-hmac-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha1_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct ahash_alg algs_sha224_sha256[] = {
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "stm32-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .setkey = stm32_hash_setkey,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "stm32-hmac-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha224_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "stm32-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "stm32-hmac-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha256_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
+{
+ unsigned int i, j;
+ int err;
+
+ for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+ for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
+ err = crypto_register_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ if (err)
+ goto err_algs;
+ }
+ }
+
+ return 0;
+err_algs:
+ dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
+ for (; i--; ) {
+ for (; j--;)
+ crypto_unregister_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ }
+
+ return err;
+}
+
+static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+ for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
+ crypto_unregister_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ }
+
+ return 0;
+}
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
+ {
+ .algs_list = algs_md5_sha1,
+ .size = ARRAY_SIZE(algs_md5_sha1),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+ .algs_info = stm32_hash_algs_info_stm32f4,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
+};
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
+ {
+ .algs_list = algs_md5_sha1,
+ .size = ARRAY_SIZE(algs_md5_sha1),
+ },
+ {
+ .algs_list = algs_sha224_sha256,
+ .size = ARRAY_SIZE(algs_sha224_sha256),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+ .algs_info = stm32_hash_algs_info_stm32f7,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+ {
+ .compatible = "st,stm32f456-hash",
+ .data = &stm32_hash_pdata_stm32f4,
+ },
+ {
+ .compatible = "st,stm32f756-hash",
+ .data = &stm32_hash_pdata_stm32f7,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
+
+static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
+ struct device *dev)
+{
+ const struct of_device_id *match;
+ int err;
+
+ match = of_match_device(stm32_hash_of_match, dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(dev->of_node, "dma-maxburst",
+ &hdev->dma_maxburst);
+
+ hdev->pdata = match->data;
+
+ return err;
+}
+
+static int stm32_hash_probe(struct platform_device *pdev)
+{
+ struct stm32_hash_dev *hdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, irq;
+
+ hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdev->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdev->io_base))
+ return PTR_ERR(hdev->io_base);
+
+ hdev->phys_base = res->start;
+
+ ret = stm32_hash_get_of_match(hdev, dev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
+ stm32_hash_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), hdev);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ hdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdev->clk)) {
+ dev_err(dev, "failed to get clock for hash (%lu)\n",
+ PTR_ERR(hdev->clk));
+ return PTR_ERR(hdev->clk);
+ }
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable hash clock (%d)\n", ret);
+ return ret;
+ }
+
+ hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(hdev->rst)) {
+ reset_control_assert(hdev->rst);
+ udelay(2);
+ reset_control_deassert(hdev->rst);
+ }
+
+ hdev->dev = dev;
+
+ platform_set_drvdata(pdev, hdev);
+
+ ret = stm32_hash_dma_init(hdev);
+ if (ret)
+ dev_dbg(dev, "DMA mode not available\n");
+
+ spin_lock(&stm32_hash.lock);
+ list_add_tail(&hdev->list, &stm32_hash.dev_list);
+ spin_unlock(&stm32_hash.lock);
+
+ /* Initialize crypto engine */
+ hdev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!hdev->engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+
+ hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
+ hdev->engine->hash_one_request = stm32_hash_one_request;
+
+ ret = crypto_engine_start(hdev->engine);
+ if (ret)
+ goto err_engine_start;
+
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+
+ /* Register algos */
+ ret = stm32_hash_register_algs(hdev);
+ if (ret)
+ goto err_algs;
+
+ dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
+ stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+
+ return 0;
+
+err_algs:
+err_engine_start:
+ crypto_engine_exit(hdev->engine);
+err_engine:
+ spin_lock(&stm32_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&stm32_hash.lock);
+
+ if (hdev->dma_lch)
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return ret;
+}
+
+static int stm32_hash_remove(struct platform_device *pdev)
+{
+ static struct stm32_hash_dev *hdev;
+
+ hdev = platform_get_drvdata(pdev);
+ if (!hdev)
+ return -ENODEV;
+
+ stm32_hash_unregister_algs(hdev);
+
+ crypto_engine_exit(hdev->engine);
+
+ spin_lock(&stm32_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&stm32_hash.lock);
+
+ if (hdev->dma_lch)
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_hash_driver = {
+ .probe = stm32_hash_probe,
+ .remove = stm32_hash_remove,
+ .driver = {
+ .name = "stm32-hash",
+ .of_match_table = stm32_hash_of_match,
+ }
+};
+
+module_platform_driver(stm32_hash_driver);
+
+MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index ec83b1e6bfe8..090582baecfe 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -107,12 +107,12 @@ static int stm32_crc_init(struct shash_desc *desc)
spin_unlock_bh(&crc_list.lock);
/* Reset, set key, poly and configure in bit reverse mode */
- writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl(ctx->crc->regs + CRC_DR);
+ ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
return 0;
@@ -135,7 +135,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
- writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR);
+ writel_relaxed(*(u32 *)crc->pending_data,
+ crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
}
@@ -143,10 +144,10 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
- writel(*(d32++), crc->regs + CRC_DR);
+ writel_relaxed(*(d32++), crc->regs + CRC_DR);
/* Store partial result */
- ctx->partial = readl(crc->regs + CRC_DR);
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
/* Check for pending data (non 32 bits) */
length &= 3;
@@ -295,7 +296,7 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shash(algs);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
clk_disable_unprepare(crc->clk);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
index 8f4c7a273141..ccb893219079 100644
--- a/drivers/crypto/sunxi-ss/Makefile
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
+sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 02ad8256e900..1547cbe13dc2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -213,6 +213,23 @@ static struct sun4i_ss_alg_template ss_algs[] = {
}
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun4i_ss_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = sun4i_ss_prng_generate,
+ .seed = sun4i_ss_prng_seed,
+ .seedsize = SS_SEED_LEN / BITS_PER_BYTE,
+ }
+},
+#endif
};
static int sun4i_ss_probe(struct platform_device *pdev)
@@ -355,6 +372,13 @@ static int sun4i_ss_probe(struct platform_device *pdev)
goto error_alg;
}
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ err = crypto_register_rng(&ss_algs[i].alg.rng);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.rng.base.cra_name);
+ }
+ break;
}
}
platform_set_drvdata(pdev, ss);
@@ -369,6 +393,9 @@ error_alg:
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&ss_algs[i].alg.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
}
}
if (ss->reset)
@@ -393,6 +420,9 @@ static int sun4i_ss_remove(struct platform_device *pdev)
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&ss_algs[i].alg.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
}
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
new file mode 100644
index 000000000000..0d01d1624252
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -0,0 +1,56 @@
+#include "sun4i-ss.h"
+
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun4i_ss_alg_template *algt;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ memcpy(algt->ss->seed, seed, slen);
+
+ return 0;
+}
+
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun4i_ss_alg_template *algt;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ int i;
+ u32 v;
+ u32 *data = (u32 *)dst;
+ const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
+ size_t len;
+ struct sun4i_ss_ctx *ss;
+ unsigned int todo = (dlen / 4) * 4;
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ ss = algt->ss;
+
+ spin_lock(&ss->slock);
+
+ writel(mode, ss->base + SS_CTL);
+
+ while (todo > 0) {
+ /* write the seed */
+ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++)
+ writel(ss->seed[i], ss->base + SS_KEY0 + i * 4);
+
+ /* Read the random data */
+ len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo);
+ readsl(ss->base + SS_TXFIFO, data, len / 4);
+ data += len / 4;
+ todo -= len;
+
+ /* Update the seed */
+ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) {
+ v = readl(ss->base + SS_KEY0 + i * 4);
+ ss->seed[i] = v;
+ }
+ }
+
+ writel(0, ss->base + SS_CTL);
+ spin_unlock(&ss->slock);
+ return dlen;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index a0e1efc1cb2a..f3ac90692ac6 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -32,6 +32,7 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
#define SS_CTL 0x00
#define SS_KEY0 0x04
@@ -127,6 +128,9 @@
#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
+#define SS_SEED_LEN 192
+#define SS_DATA_LEN 160
+
struct sun4i_ss_ctx {
void __iomem *base;
int irq;
@@ -136,6 +140,9 @@ struct sun4i_ss_ctx {
struct device *dev;
struct resource *res;
spinlock_t slock; /* control the use of the device */
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+ u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+#endif
};
struct sun4i_ss_alg_template {
@@ -144,6 +151,7 @@ struct sun4i_ss_alg_template {
union {
struct skcipher_alg crypto;
struct ahash_alg hash;
+ struct rng_alg rng;
} alg;
struct sun4i_ss_ctx *ss;
};
@@ -201,3 +209,6 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 49defda4e03d..5035b0dc1e40 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -27,12 +27,68 @@
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
+
+struct virtio_crypto_ablkcipher_ctx {
+ struct virtio_crypto *vcrypto;
+ struct crypto_tfm *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_sym_request {
+ struct virtio_crypto_request base;
+
+ /* Cipher or aead */
+ uint32_t type;
+ struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+ struct ablkcipher_request *ablkcipher_req;
+ uint8_t *iv;
+ /* Encryption? */
+ bool encrypt;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
static unsigned int virtio_crypto_active_devs;
+static void virtio_crypto_ablkcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
+ struct ablkcipher_request *req,
+ int err);
+
+static void virtio_crypto_dataq_sym_callback
+ (struct virtio_crypto_request *vc_req, int len)
+{
+ struct virtio_crypto_sym_request *vc_sym_req =
+ container_of(vc_req, struct virtio_crypto_sym_request, base);
+ struct ablkcipher_request *ablk_req;
+ int error;
+
+ /* Finish the encrypt or decrypt process */
+ if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_sym_req->ablkcipher_req;
+ virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+ ablk_req, error);
+ }
+}
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
{
@@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
static int
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
struct data_queue *data_vq)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
@@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
}
vc_req->req_data = req_data;
- vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
/* Head of operation */
- if (vc_req->encrypt) {
+ if (vc_sym_req->encrypt) {
req_data->header.session_id =
cpu_to_le64(ctx->enc_sess_info.session_id);
req_data->header.opcode =
@@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
memcpy(iv, req->info, ivsize);
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
- vc_req->iv = iv;
+ vc_sym_req->iv = iv;
/* Source data */
for (i = 0; i < src_nents; i++)
@@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
- vc_req->ablkcipher_ctx = ctx;
- vc_req->ablkcipher_req = req;
- vc_req->encrypt = true;
vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+ vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->encrypt = true;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
}
@@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
- vc_req->ablkcipher_ctx = ctx;
- vc_req->ablkcipher_req = req;
-
- vc_req->encrypt = false;
vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+ vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->encrypt = false;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
}
@@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
ctx->tfm = tfm;
return 0;
@@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine,
struct ablkcipher_request *req)
{
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
- ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq);
+ ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
@@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0;
}
-void virtio_crypto_ablkcipher_finalize_req(
- struct virtio_crypto_request *vc_req,
+static void virtio_crypto_ablkcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
int err)
{
- crypto_finalize_cipher_request(vc_req->dataq->engine, req, err);
-
- virtcrypto_clear_request(vc_req);
+ crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
+ kzfree(vc_sym_req->iv);
+ virtcrypto_clear_request(&vc_sym_req->base);
}
static struct crypto_alg virtio_crypto_algs[] = { {
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index da6d8c0ea407..e976539a05d9 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info {
__u64 session_id;
};
-struct virtio_crypto_ablkcipher_ctx {
- struct virtio_crypto *vcrypto;
- struct crypto_tfm *tfm;
-
- struct virtio_crypto_sym_session_info enc_sess_info;
- struct virtio_crypto_sym_session_info dec_sess_info;
-};
+struct virtio_crypto_request;
+typedef void (*virtio_crypto_data_callback)
+ (struct virtio_crypto_request *vc_req, int len);
struct virtio_crypto_request {
- /* Cipher or aead */
- uint32_t type;
uint8_t status;
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
- struct ablkcipher_request *ablkcipher_req;
struct virtio_crypto_op_data_req *req_data;
struct scatterlist **sgs;
- uint8_t *iv;
- /* Encryption? */
- bool encrypt;
struct data_queue *dataq;
+ virtio_crypto_data_callback alg_cb;
};
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
@@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine,
struct ablkcipher_request *req);
-void virtio_crypto_ablkcipher_finalize_req(
- struct virtio_crypto_request *vc_req,
- struct ablkcipher_request *req,
- int err);
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index a111cd72797b..ff1410a32c2b 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -29,7 +29,6 @@ void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
{
if (vc_req) {
- kzfree(vc_req->iv);
kzfree(vc_req->req_data);
kfree(vc_req->sgs);
}
@@ -41,40 +40,18 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq)
struct virtio_crypto_request *vc_req;
unsigned long flags;
unsigned int len;
- struct ablkcipher_request *ablk_req;
- int error;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
- if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
- switch (vc_req->status) {
- case VIRTIO_CRYPTO_OK:
- error = 0;
- break;
- case VIRTIO_CRYPTO_INVSESS:
- case VIRTIO_CRYPTO_ERR:
- error = -EINVAL;
- break;
- case VIRTIO_CRYPTO_BADMSG:
- error = -EBADMSG;
- break;
- default:
- error = -EIO;
- break;
- }
- ablk_req = vc_req->ablkcipher_req;
-
- spin_unlock_irqrestore(
- &vcrypto->data_vq[qid].lock, flags);
- /* Finish the encrypt or decrypt process */
- virtio_crypto_ablkcipher_finalize_req(vc_req,
- ablk_req, error);
- spin_lock_irqsave(
- &vcrypto->data_vq[qid].lock, flags);
- }
+ spin_unlock_irqrestore(
+ &vcrypto->data_vq[qid].lock, flags);
+ if (vc_req->alg_cb)
+ vc_req->alg_cb(vc_req, len);
+ spin_lock_irqsave(
+ &vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
@@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
return -EPERM;
}
- dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+ dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
} else {
virtcrypto_dev_stop(vcrypto);
dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 9c26d9e8dbea..17d84217dd76 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -104,8 +104,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_enable();
preempt_enable();
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cdf6b1e12460..fa17e5452796 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -758,9 +758,8 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
int i, r;
/* xor whitening with sector number */
- memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
- crypto_xor(buf, (u8 *)&sector, 8);
- crypto_xor(&buf[8], (u8 *)&sector, 8);
+ crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
+ crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
@@ -805,10 +804,10 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
}
/* Calculate IV */
- memcpy(iv, tcw->iv_seed, cc->iv_size);
- crypto_xor(iv, (u8 *)&sector, 8);
+ crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
if (cc->iv_size > 8)
- crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
+ crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
+ cc->iv_size - 8);
return r;
}