summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c4
-rw-r--r--drivers/char/hw_random/intel-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/hw_random/xgene-rng.c423
-rw-r--r--drivers/crypto/caam/caamhash.c28
-rw-r--r--drivers/crypto/caam/ctrl.c138
-rw-r--r--drivers/crypto/caam/intern.h9
-rw-r--r--drivers/crypto/caam/regs.h51
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h54
-rw-r--r--drivers/crypto/mv_cesa.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c66
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c14
19 files changed, 601 insertions, 234 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 836b061ced35..91a04ae8003c 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -333,6 +333,19 @@ config HW_RANDOM_MSM
If unsure, say Y.
+config HW_RANDOM_XGENE
+ tristate "APM X-Gene True Random Number Generator (TRNG) support"
+ depends on HW_RANDOM && ARCH_XGENE
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on APM X-Gene SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xgene_rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 199ed283e149..0b4cd57f4e24 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
+obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index c6af038682f1..48f6a83cdd61 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -142,10 +142,10 @@ found:
amd_rng.priv = (unsigned long)pmbase;
amd_pdev = pdev;
- printk(KERN_INFO "AMD768 RNG detected\n");
+ pr_info("AMD768 RNG detected\n");
err = hwrng_register(&amd_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
release_region(pmbase + 0xF0, 8);
goto out;
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 4c4d4e140f98..0d0579fe465e 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -109,10 +109,10 @@ found:
goto out;
geode_rng.priv = (unsigned long)mem;
- printk(KERN_INFO "AMD Geode RNG detected\n");
+ pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
goto err_unmap;
}
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 86fe45c19968..290c880266bf 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -199,7 +199,7 @@ static int intel_rng_init(struct hwrng *rng)
if ((hw_status & INTEL_RNG_ENABLED) == 0)
hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
if ((hw_status & INTEL_RNG_ENABLED) == 0) {
- printk(KERN_ERR PFX "cannot enable RNG, aborting\n");
+ pr_err(PFX "cannot enable RNG, aborting\n");
goto out;
}
err = 0;
@@ -216,7 +216,7 @@ static void intel_rng_cleanup(struct hwrng *rng)
if (hw_status & INTEL_RNG_ENABLED)
hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
else
- printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
+ pr_warn(PFX "unusual: RNG already disabled\n");
}
@@ -274,7 +274,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw)
if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
(dvc != INTEL_FWH_DEVICE_CODE_8M &&
dvc != INTEL_FWH_DEVICE_CODE_4M)) {
- printk(KERN_NOTICE PFX "FWH not detected\n");
+ pr_notice(PFX "FWH not detected\n");
return -ENODEV;
}
@@ -306,7 +306,6 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
(BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
== BIOS_CNTL_LOCK_ENABLE_MASK) {
static __initdata /*const*/ char warning[] =
- KERN_WARNING
PFX "Firmware space is locked read-only. If you can't or\n"
PFX "don't want to disable this in firmware setup, and if\n"
PFX "you are certain that your system has a functional\n"
@@ -314,7 +313,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
if (no_fwh_detect)
return -ENODEV;
- printk(warning);
+ pr_warn("%s", warning);
return -EBUSY;
}
@@ -392,10 +391,10 @@ fwh_done:
goto out;
}
- printk(KERN_INFO "Intel 82802 RNG detected\n");
+ pr_info("Intel 82802 RNG detected\n");
err = hwrng_register(&intel_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
iounmap(mem);
}
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c66279bb6ef3..c0347d1dded0 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -113,7 +113,7 @@ static int rng_probe(struct platform_device *ofdev)
pasemi_rng.priv = (unsigned long)rng_regs;
- printk(KERN_INFO "Registering PA Semi RNG\n");
+ pr_info("Registering PA Semi RNG\n");
err = hwrng_register(&pasemi_rng);
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index ab7ffdec0ec3..6226aa08c36a 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -86,7 +86,7 @@ static struct vio_driver pseries_rng_driver = {
static int __init rng_init(void)
{
- printk(KERN_INFO "Registering IBM pSeries RNG driver\n");
+ pr_info("Registering IBM pSeries RNG driver\n");
return vio_register_driver(&pseries_rng_driver);
}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index de5a6dcfb3e2..a3bebef255ad 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -141,7 +141,7 @@ static int via_rng_init(struct hwrng *rng)
* register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
if (!cpu_has_xstore_enabled) {
- printk(KERN_ERR PFX "can't enable hardware RNG "
+ pr_err(PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n");
return -ENODEV;
}
@@ -180,7 +180,7 @@ static int via_rng_init(struct hwrng *rng)
unneeded */
rdmsr(MSR_VIA_RNG, lo, hi);
if ((lo & VIA_RNG_ENABLE) == 0) {
- printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
+ pr_err(PFX "cannot enable VIA C3 RNG, aborting\n");
return -ENODEV;
}
@@ -202,10 +202,10 @@ static int __init mod_init(void)
if (!cpu_has_xstore)
return -ENODEV;
- printk(KERN_INFO "VIA RNG detected\n");
+ pr_info("VIA RNG detected\n");
err = hwrng_register(&via_rng);
if (err) {
- printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ pr_err(PFX "RNG registering failed (%d)\n",
err);
goto out;
}
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
new file mode 100644
index 000000000000..23caa05380a8
--- /dev/null
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -0,0 +1,423 @@
+/*
+ * APM X-Gene SoC RNG Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Rameshwar Prasad Sahu <rsahu@apm.com>
+ * Shamal Winchurkar <swinchurkar@apm.com>
+ * Feng Kan <fkan@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/timer.h>
+
+#define RNG_MAX_DATUM 4
+#define MAX_TRY 100
+#define XGENE_RNG_RETRY_COUNT 20
+#define XGENE_RNG_RETRY_INTERVAL 10
+
+/* RNG Registers */
+#define RNG_INOUT_0 0x00
+#define RNG_INTR_STS_ACK 0x10
+#define RNG_CONTROL 0x14
+#define RNG_CONFIG 0x18
+#define RNG_ALARMCNT 0x1c
+#define RNG_FROENABLE 0x20
+#define RNG_FRODETUNE 0x24
+#define RNG_ALARMMASK 0x28
+#define RNG_ALARMSTOP 0x2c
+#define RNG_OPTIONS 0x78
+#define RNG_EIP_REV 0x7c
+
+#define MONOBIT_FAIL_MASK BIT(7)
+#define POKER_FAIL_MASK BIT(6)
+#define LONG_RUN_FAIL_MASK BIT(5)
+#define RUN_FAIL_MASK BIT(4)
+#define NOISE_FAIL_MASK BIT(3)
+#define STUCK_OUT_MASK BIT(2)
+#define SHUTDOWN_OFLO_MASK BIT(1)
+#define READY_MASK BIT(0)
+
+#define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24)
+#define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20)
+#define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16)
+#define MAX_REFILL_CYCLES_SET(dst, src) \
+ ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000))
+#define MIN_REFILL_CYCLES_SET(dst, src) \
+ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ALARM_THRESHOLD_SET(dst, src) \
+ ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
+#define ENABLE_RNG_SET(dst, src) \
+ ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10)))
+#define REGSPEC_TEST_MODE_SET(dst, src) \
+ ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8)))
+#define MONOBIT_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7)))
+#define POKER_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6)))
+#define LONG_RUN_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5)))
+#define RUN_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4)))
+#define NOISE_FAIL_MASK_SET(dst, src) \
+ ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3)))
+#define STUCK_OUT_MASK_SET(dst, src) \
+ ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2)))
+#define SHUTDOWN_OFLO_MASK_SET(dst, src) \
+ ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1)))
+
+struct xgene_rng_dev {
+ u32 irq;
+ void __iomem *csr_base;
+ u32 revision;
+ u32 datum_size;
+ u32 failure_cnt; /* Failure count last minute */
+ unsigned long failure_ts;/* First failure timestamp */
+ struct timer_list failure_timer;
+ struct device *dev;
+ struct clk *clk;
+};
+
+static void xgene_rng_expired_timer(unsigned long arg)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg;
+
+ /* Clear failure counter as timer expired */
+ disable_irq(ctx->irq);
+ ctx->failure_cnt = 0;
+ del_timer(&ctx->failure_timer);
+ enable_irq(ctx->irq);
+}
+
+static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
+{
+ ctx->failure_timer.data = (unsigned long) ctx;
+ ctx->failure_timer.function = xgene_rng_expired_timer;
+ ctx->failure_timer.expires = jiffies + 120 * HZ;
+ add_timer(&ctx->failure_timer);
+}
+
+/*
+ * Initialize or reinit free running oscillators (FROs)
+ */
+static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val)
+{
+ writel(fro_val, ctx->csr_base + RNG_FRODETUNE);
+ writel(0x00000000, ctx->csr_base + RNG_ALARMMASK);
+ writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP);
+ writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE);
+}
+
+static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
+{
+ u32 val;
+
+ val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+ if (val & MONOBIT_FAIL_MASK)
+ /*
+ * LFSR detected an out-of-bounds number of 1s after
+ * checking 20,000 bits (test T1 as specified in the
+ * AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val);
+ if (val & POKER_FAIL_MASK)
+ /*
+ * LFSR detected an out-of-bounds value in at least one
+ * of the 16 poker_count_X counters or an out of bounds sum
+ * of squares value after checking 20,000 bits (test T2 as
+ * specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test poker failure error 0x%08X\n", val);
+ if (val & LONG_RUN_FAIL_MASK)
+ /*
+ * LFSR detected a sequence of 34 identical bits
+ * (test T4 as specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test long run failure error 0x%08X\n", val);
+ if (val & RUN_FAIL_MASK)
+ /*
+ * LFSR detected an outof-bounds value for at least one
+ * of the running counters after checking 20,000 bits
+ * (test T3 as specified in the AIS-31 standard)
+ */
+ dev_err(ctx->dev, "test run failure error 0x%08X\n", val);
+ if (val & NOISE_FAIL_MASK)
+ /* LFSR detected a sequence of 48 identical bits */
+ dev_err(ctx->dev, "noise failure error 0x%08X\n", val);
+ if (val & STUCK_OUT_MASK)
+ /*
+ * Detected output data registers generated same value twice
+ * in a row
+ */
+ dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val);
+
+ if (val & SHUTDOWN_OFLO_MASK) {
+ u32 frostopped;
+
+ /* FROs shut down after a second error event. Try recover. */
+ if (++ctx->failure_cnt == 1) {
+ /* 1st time, just recover */
+ ctx->failure_ts = jiffies;
+ frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+ xgene_rng_init_fro(ctx, frostopped);
+
+ /*
+ * We must start a timer to clear out this error
+ * in case the system timer wrap around
+ */
+ xgene_rng_start_timer(ctx);
+ } else {
+ /* 2nd time failure in lesser than 1 minute? */
+ if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) {
+ dev_err(ctx->dev,
+ "FRO shutdown failure error 0x%08X\n",
+ val);
+ } else {
+ /* 2nd time failure after 1 minutes, recover */
+ ctx->failure_ts = jiffies;
+ ctx->failure_cnt = 1;
+ /*
+ * We must start a timer to clear out this
+ * error in case the system timer wrap
+ * around
+ */
+ xgene_rng_start_timer(ctx);
+ }
+ frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
+ xgene_rng_init_fro(ctx, frostopped);
+ }
+ }
+ /* Clear them all */
+ writel(val, ctx->csr_base + RNG_INTR_STS_ACK);
+}
+
+static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
+
+ /* RNG Alarm Counter overflow */
+ xgene_rng_chk_overflow(ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int xgene_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+ u32 i, val = 0;
+
+ for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) {
+ val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
+ if ((val & READY_MASK) || !wait)
+ break;
+ udelay(XGENE_RNG_RETRY_INTERVAL);
+ }
+
+ return (val & READY_MASK);
+}
+
+static int xgene_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+ int i;
+
+ for (i = 0; i < ctx->datum_size; i++)
+ data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4);
+
+ /* Clear ready bit to start next transaction */
+ writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+ return ctx->datum_size << 2;
+}
+
+static void xgene_rng_init_internal(struct xgene_rng_dev *ctx)
+{
+ u32 val;
+
+ writel(0x00000000, ctx->csr_base + RNG_CONTROL);
+
+ val = MAX_REFILL_CYCLES_SET(0, 10);
+ val = MIN_REFILL_CYCLES_SET(val, 10);
+ writel(val, ctx->csr_base + RNG_CONFIG);
+
+ val = ALARM_THRESHOLD_SET(0, 0xFF);
+ writel(val, ctx->csr_base + RNG_ALARMCNT);
+
+ xgene_rng_init_fro(ctx, 0);
+
+ writel(MONOBIT_FAIL_MASK |
+ POKER_FAIL_MASK |
+ LONG_RUN_FAIL_MASK |
+ RUN_FAIL_MASK |
+ NOISE_FAIL_MASK |
+ STUCK_OUT_MASK |
+ SHUTDOWN_OFLO_MASK |
+ READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
+
+ val = ENABLE_RNG_SET(0, 1);
+ val = MONOBIT_FAIL_MASK_SET(val, 1);
+ val = POKER_FAIL_MASK_SET(val, 1);
+ val = LONG_RUN_FAIL_MASK_SET(val, 1);
+ val = RUN_FAIL_MASK_SET(val, 1);
+ val = NOISE_FAIL_MASK_SET(val, 1);
+ val = STUCK_OUT_MASK_SET(val, 1);
+ val = SHUTDOWN_OFLO_MASK_SET(val, 1);
+ writel(val, ctx->csr_base + RNG_CONTROL);
+}
+
+static int xgene_rng_init(struct hwrng *rng)
+{
+ struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
+
+ ctx->failure_cnt = 0;
+ init_timer(&ctx->failure_timer);
+
+ ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
+
+ dev_dbg(ctx->dev, "Rev %d.%d.%d\n",
+ MAJOR_HW_REV_RD(ctx->revision),
+ MINOR_HW_REV_RD(ctx->revision),
+ HW_PATCH_LEVEL_RD(ctx->revision));
+
+ dev_dbg(ctx->dev, "Options 0x%08X",
+ readl(ctx->csr_base + RNG_OPTIONS));
+
+ xgene_rng_init_internal(ctx);
+
+ ctx->datum_size = RNG_MAX_DATUM;
+
+ return 0;
+}
+
+static struct hwrng xgene_rng_func = {
+ .name = "xgene-rng",
+ .init = xgene_rng_init,
+ .data_present = xgene_rng_data_present,
+ .data_read = xgene_rng_data_read,
+};
+
+static int xgene_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xgene_rng_dev *ctx;
+ int rc = 0;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctx);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->csr_base))
+ return PTR_ERR(ctx->csr_base);
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return ctx->irq;
+ }
+
+ dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
+ ctx->csr_base, ctx->irq);
+
+ rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
+ dev_name(&pdev->dev), ctx);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
+ return rc;
+ }
+
+ /* Enable IP clock */
+ ctx->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ctx->clk)) {
+ dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
+ } else {
+ rc = clk_prepare_enable(ctx->clk);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "clock prepare enable failed for RNG");
+ return rc;
+ }
+ }
+
+ xgene_rng_func.priv = (unsigned long) ctx;
+
+ rc = hwrng_register(&xgene_rng_func);
+ if (rc) {
+ dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
+ if (!IS_ERR(ctx->clk))
+ clk_disable_unprepare(ctx->clk);
+ return rc;
+ }
+
+ rc = device_init_wakeup(&pdev->dev, 1);
+ if (rc) {
+ dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
+ rc);
+ if (!IS_ERR(ctx->clk))
+ clk_disable_unprepare(ctx->clk);
+ hwrng_unregister(&xgene_rng_func);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int xgene_rng_remove(struct platform_device *pdev)
+{
+ struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
+ int rc;
+
+ rc = device_init_wakeup(&pdev->dev, 0);
+ if (rc)
+ dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
+ if (!IS_ERR(ctx->clk))
+ clk_disable_unprepare(ctx->clk);
+ hwrng_unregister(&xgene_rng_func);
+
+ return rc;
+}
+
+static const struct of_device_id xgene_rng_of_match[] = {
+ { .compatible = "apm,xgene-rng" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
+
+static struct platform_driver xgene_rng_driver = {
+ .probe = xgene_rng_probe,
+ .remove = xgene_rng_remove,
+ .driver = {
+ .name = "xgene-rng",
+ .of_match_table = xgene_rng_of_match,
+ },
+};
+
+module_platform_driver(xgene_rng_driver);
+MODULE_DESCRIPTION("APM X-Gene RNG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index b464d03ebf40..f347ab7eea95 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -836,8 +836,9 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->sec4_sg + sec4_sg_src_index,
chained);
if (*next_buflen) {
- sg_copy_part(next_buf, req->src, to_hash -
- *buflen, req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
state->current_buf = !state->current_buf;
}
} else {
@@ -878,7 +879,8 @@ static int ahash_update_ctx(struct ahash_request *req)
kfree(edesc);
}
} else if (*next_buflen) {
- sg_copy(buf + *buflen, req->src, req->nbytes);
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
@@ -1262,8 +1264,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + 1, chained);
if (*next_buflen) {
- sg_copy_part(next_buf, req->src, to_hash - *buflen,
- req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
state->current_buf = !state->current_buf;
}
@@ -1304,7 +1307,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
kfree(edesc);
}
} else if (*next_buflen) {
- sg_copy(buf + *buflen, req->src, req->nbytes);
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = 0;
}
@@ -1413,9 +1417,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = state->buf_0 + state->current_buf *
- CAAM_MAX_HASH_BLOCK_SIZE;
- int *next_buflen = &state->buflen_0 + state->current_buf;
+ u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *next_buflen = state->current_buf ?
+ &state->buflen_1 : &state->buflen_0;
int to_hash;
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
@@ -1476,7 +1480,8 @@ static int ahash_update_first(struct ahash_request *req)
}
if (*next_buflen)
- sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
@@ -1511,7 +1516,8 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- sg_copy(next_buf, req->src, req->nbytes);
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 3cade79ea41e..31000c8c4a90 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,5 +1,4 @@
-/*
- * CAAM control-plane driver backend
+/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
@@ -81,38 +80,37 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
u32 *status)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ struct caam_deco __iomem *deco = ctrlpriv->deco;
unsigned int timeout = 100000;
u32 deco_dbg_reg, flags;
int i;
- /* Set the bit to request direct access to DECO0 */
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
if (ctrlpriv->virt_en == 1) {
- setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+ setbits32(&ctrl->deco_rsr, DECORSR_JR0);
- while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+ while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
--timeout)
cpu_relax();
timeout = 100000;
}
- setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
- while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
+ while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
--timeout)
cpu_relax();
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
+ wr_reg32(&deco->descbuf[i], *(desc + i));
flags = DECO_JQCR_WHL;
/*
@@ -123,11 +121,11 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
- wr_reg32(&topregs->deco.jr_ctl_hi, flags);
+ wr_reg32(&deco->jr_ctl_hi, flags);
timeout = 10000000;
do {
- deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+ deco_dbg_reg = rd_reg32(&deco->desc_dbg);
/*
* If an error occured in the descriptor, then
* the DECO status field will be set to 0x0D
@@ -138,14 +136,14 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
cpu_relax();
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
- *status = rd_reg32(&topregs->deco.op_status_hi) &
+ *status = rd_reg32(&deco->op_status_hi) &
DECO_OP_STATUS_HI_ERR_MASK;
if (ctrlpriv->virt_en == 1)
- clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+ clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
/* Mark the DECO as free */
- clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+ clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
if (!timeout)
return -EAGAIN;
@@ -176,13 +174,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int gen_sk)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
u32 *desc, status, rdsta_val;
int ret = 0, sh_idx;
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
- r4tst = &topregs->ctrl.r4tst[0];
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+ r4tst = &ctrl->r4tst[0];
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
@@ -212,12 +210,11 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* CAAM eras), then try again.
*/
rdsta_val =
- rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+ rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if (status || !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
break;
-
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
@@ -285,12 +282,12 @@ static int caam_remove(struct platform_device *pdev)
{
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl;
int ring, ret = 0;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
@@ -308,7 +305,7 @@ static int caam_remove(struct platform_device *pdev)
#endif
/* Unmap controller region */
- iounmap(&topregs->ctrl);
+ iounmap(&ctrl);
return ret;
}
@@ -323,12 +320,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
- struct caam_full __iomem *topregs;
+ struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
u32 val;
- topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
- r4tst = &topregs->ctrl.r4tst[0];
+ ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+ r4tst = &ctrl->r4tst[0];
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
@@ -355,10 +352,19 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtsdctl, val);
/* min. freq. count, equal to 1/4 of the entropy sample length */
wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
- /* max. freq. count, equal to 8 times the entropy sample length */
- wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
+ /* disable maximum frequency count */
+ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+ /* read the control register */
+ val = rd_reg32(&r4tst->rtmctl);
+ /*
+ * select raw sampling in both entropy shifter
+ * and statistical checker
+ */
+ setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
/* put RNG4 into run mode */
- clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ clrbits32(&val, RTMCTL_PRGM);
+ /* write back the control register */
+ wr_reg32(&r4tst->rtmctl, val);
}
/**
@@ -387,13 +393,14 @@ static int caam_probe(struct platform_device *pdev)
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
- struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
u32 scfgr, comp_params;
u32 cha_vid_ls;
+ int pg_size;
+ int BLOCK_OFFSET = 0;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
GFP_KERNEL);
@@ -412,10 +419,27 @@ static int caam_probe(struct platform_device *pdev)
dev_err(dev, "caam: of_iomap() failed\n");
return -ENOMEM;
}
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+ /* Finding the page size for using the CTPR_MS register */
+ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
- /* topregs used to derive pointers to CAAM sub-blocks only */
- topregs = (struct caam_full __iomem *)ctrl;
+ /* Allocating the BLOCK_OFFSET based on the supported page size on
+ * the platform
+ */
+ if (pg_size == 0)
+ BLOCK_OFFSET = PG_SIZE_4K;
+ else
+ BLOCK_OFFSET = PG_SIZE_64K;
+
+ ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+ );
+ ctrlpriv->deco = (struct caam_deco __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * DECO_BLOCK_NUMBER
+ );
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
@@ -424,15 +448,14 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
*/
- comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
- scfgr = rd_reg32(&topregs->ctrl.scfgr);
+ scfgr = rd_reg32(&ctrl->scfgr);
ctrlpriv->virt_en = 0;
if (comp_params & CTPR_MS_VIRT_EN_INCL) {
@@ -450,7 +473,7 @@ static int caam_probe(struct platform_device *pdev)
}
if (ctrlpriv->virt_en == 1)
- setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+ setbits32(&ctrl->jrstart, JRSTART_JR0_START |
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
@@ -477,7 +500,7 @@ static int caam_probe(struct platform_device *pdev)
sizeof(struct platform_device *) * rspec,
GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
- iounmap(&topregs->ctrl);
+ iounmap(&ctrl);
return -ENOMEM;
}
@@ -493,18 +516,26 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
+ ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
+ ((uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
ctrlpriv->total_jobrs++;
ring++;
- }
+ }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present =
- !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+ !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
+ ctrlpriv->qi = (struct caam_queue_if __force *)
+ ((uint8_t *)ctrl +
+ BLOCK_OFFSET * QI_BLOCK_NUMBER
+ );
/* This is all that's required to physically enable QI */
- wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
+ wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
}
/* If no QI and no rings specified, quit and go home */
@@ -514,7 +545,7 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
- cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
+ cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
@@ -522,7 +553,7 @@ static int caam_probe(struct platform_device *pdev)
*/
if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
- rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+ rd_reg32(&ctrl->r4tst[0].rdsta);
/*
* If the secure keys (TDKEK, JDKEK, TDSK), were already
* generated, signal this to the function that is instantiating
@@ -533,7 +564,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
do {
int inst_handles =
- rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+ rd_reg32(&ctrl->r4tst[0].rdsta) &
RDSTA_IFMASK;
/*
* If either SH were instantiated by somebody else
@@ -544,6 +575,9 @@ static int caam_probe(struct platform_device *pdev)
* the TRNG parameters.
*/
if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+ ent_delay);
kick_trng(pdev, ent_delay);
ent_delay += 400;
}
@@ -556,6 +590,12 @@ static int caam_probe(struct platform_device *pdev)
*/
ret = instantiate_rng(dev, inst_handles,
gen_sk);
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
+ * so don't hog the CPU
+ */
+ cpu_relax();
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
@@ -569,13 +609,13 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
- setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
+ setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
- caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
- (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
+ caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
+ (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 97363db4e56e..89b94cc9e7a2 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -70,10 +70,11 @@ struct caam_drv_private {
struct platform_device *pdev;
/* Physical-presence section */
- struct caam_ctrl *ctrl; /* controller region */
- struct caam_deco **deco; /* DECO/CCB views */
- struct caam_assurance *ac;
- struct caam_queue_if *qi; /* QI control region */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+ struct caam_deco __iomem *deco; /* DECO/CCB views */
+ struct caam_assurance __iomem *assure;
+ struct caam_queue_if __iomem *qi; /* QI control region */
+ struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
/*
* Detected geometry block. Filled in from device tree if powerpc,
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index f48e344ffc39..378ddc17f60e 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -194,6 +194,8 @@ struct caam_perfmon {
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
+#define CTPR_MS_PG_SZ_MASK 0x10
+#define CTPR_MS_PG_SZ_SHIFT 4
u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
u64 rsvd1[2];
@@ -269,6 +271,16 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
+ both entropy shifter and
+ statistical checker */
+#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
+ entropy shifter and
+ statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
+ entropy shifter, raw data
+ in statistical checker */
+#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */
@@ -278,7 +290,7 @@ struct rng4tst {
};
#define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
-#define RTSDCTL_ENT_DLY_MIN 1200
+#define RTSDCTL_ENT_DLY_MIN 3200
#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */
union {
@@ -286,6 +298,7 @@ struct rng4tst {
u32 rttotsam; /* PRGM=0: total samples register */
};
u32 rtfrqmin; /* frequency count min. limit register */
+#define RTFRQMAX_DISABLE (1 << 20)
union {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
@@ -758,34 +771,10 @@ struct caam_deco {
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
-/*
- * Current top-level view of memory map is:
- *
- * 0x0000 - 0x0fff - CAAM Top-Level Control
- * 0x1000 - 0x1fff - Job Ring 0
- * 0x2000 - 0x2fff - Job Ring 1
- * 0x3000 - 0x3fff - Job Ring 2
- * 0x4000 - 0x4fff - Job Ring 3
- * 0x5000 - 0x5fff - (unused)
- * 0x6000 - 0x6fff - Assurance Controller
- * 0x7000 - 0x7fff - Queue Interface
- * 0x8000 - 0x8fff - DECO-CCB 0
- * 0x9000 - 0x9fff - DECO-CCB 1
- * 0xa000 - 0xafff - DECO-CCB 2
- * 0xb000 - 0xbfff - DECO-CCB 3
- * 0xc000 - 0xcfff - DECO-CCB 4
- *
- * caam_full describes the full register view of CAAM if useful,
- * although many configurations may choose to implement parts of
- * the register map separately, in differing privilege regions
- */
-struct caam_full {
- struct caam_ctrl __iomem ctrl;
- struct caam_job_ring jr[4];
- u64 rsvd[512];
- struct caam_assurance assure;
- struct caam_queue_if qi;
- struct caam_deco deco;
-};
-
+#define JR_BLOCK_NUMBER 1
+#define ASSURE_BLOCK_NUMBER 6
+#define QI_BLOCK_NUMBER 7
+#define DECO_BLOCK_NUMBER 8
+#define PG_SIZE_4K 0x1000
+#define PG_SIZE_64K 0x10000
#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b12ff85f4241..ce28a563effc 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -116,57 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
}
return nents;
}
-
-/* Map SG page in kernel virtual address space and copy */
-static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
- int len, int offset)
-{
- u8 *mapped_addr;
-
- /*
- * Page here can be user-space pinned using get_user_pages
- * Same must be kmapped before use and kunmapped subsequently
- */
- mapped_addr = kmap_atomic(sg_page(sg));
- memcpy(dest, mapped_addr + offset, len);
- kunmap_atomic(mapped_addr);
-}
-
-/* Copy from len bytes of sg to dest, starting from beginning */
-static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
-{
- struct scatterlist *current_sg = sg;
- int cpy_index = 0, next_cpy_index = current_sg->length;
-
- while (next_cpy_index < len) {
- sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
- current_sg->offset);
- current_sg = scatterwalk_sg_next(current_sg);
- cpy_index = next_cpy_index;
- next_cpy_index += current_sg->length;
- }
- if (cpy_index < len)
- sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
- current_sg->offset);
-}
-
-/* Copy sg data, from to_skip to end, to dest */
-static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
- int to_skip, unsigned int end)
-{
- struct scatterlist *current_sg = sg;
- int sg_index, cpy_index, offset;
-
- sg_index = current_sg->length;
- while (sg_index <= to_skip) {
- current_sg = scatterwalk_sg_next(current_sg);
- sg_index += current_sg->length;
- }
- cpy_index = sg_index - to_skip;
- offset = current_sg->offset + current_sg->length - cpy_index;
- sg_map_copy(dest, current_sg, cpy_index, offset);
- if (end - sg_index) {
- current_sg = scatterwalk_sg_next(current_sg);
- sg_copy(dest + cpy_index, current_sg, end - sg_index);
- }
-}
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index 08fcb1116d90..9249d3ed184b 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,4 +1,5 @@
#ifndef __MV_CRYPTO_H__
+#define __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
#define DIGEST_INITIAL_VAL_B 0xdd04
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6a92284a86b2..244d73378f0e 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -111,7 +111,7 @@ static int adf_chr_drv_create(void)
drv_device = device_create(adt_ctl_drv.drv_class, NULL,
MKDEV(adt_ctl_drv.major, 0),
NULL, DEVICE_NAME);
- if (!drv_device) {
+ if (IS_ERR(drv_device)) {
pr_err("QAT: failed to create device\n");
goto err_cdev_del;
}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index f854bac276b0..c40546079981 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -75,7 +75,7 @@ struct adf_etr_ring_data {
struct adf_etr_bank_data {
struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
- struct tasklet_struct resp_hanlder;
+ struct tasklet_struct resp_handler;
void __iomem *csr_addr;
struct adf_accel_dev *accel_dev;
uint32_t irq_coalesc_timer;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59df48872955..3e26fa2b293f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -105,7 +105,7 @@ struct qat_alg_cd {
#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
struct qat_auth_state {
- uint8_t data[MAX_AUTH_STATE_SIZE];
+ uint8_t data[MAX_AUTH_STATE_SIZE + 64];
} __aligned(64);
struct qat_alg_session_ctx {
@@ -113,10 +113,6 @@ struct qat_alg_session_ctx {
dma_addr_t enc_cd_paddr;
struct qat_alg_cd *dec_cd;
dma_addr_t dec_cd_paddr;
- struct qat_auth_state *auth_hw_state_enc;
- dma_addr_t auth_state_enc_paddr;
- struct qat_auth_state *auth_hw_state_dec;
- dma_addr_t auth_state_dec_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
struct qat_crypto_instance *inst;
@@ -150,8 +146,9 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_session_ctx *ctx,
const uint8_t *auth_key,
- unsigned int auth_keylen, uint8_t *auth_state)
+ unsigned int auth_keylen)
{
+ struct qat_auth_state auth_state;
struct {
struct shash_desc shash;
char ctx[crypto_shash_descsize(ctx->hash_tfm)];
@@ -161,12 +158,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- uint8_t *ipad = auth_state;
+ uint8_t *ipad = auth_state.data;
uint8_t *opad = ipad + block_size;
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
+ memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
desc.shash.tfm = ctx->hash_tfm;
desc.shash.flags = 0x0;
@@ -298,10 +296,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
void *ptr = &req_tmpl->cd_ctrl;
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
- struct icp_qat_fw_la_auth_req_params *auth_param =
- (struct icp_qat_fw_la_auth_req_params *)
- ((char *)&req_tmpl->serv_specif_rqpars +
- sizeof(struct icp_qat_fw_la_cipher_req_params));
/* CD setup */
cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
@@ -312,8 +306,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
hash->sha.inner_setup.auth_counter.counter =
cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
- if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
- (uint8_t *)ctx->auth_hw_state_enc))
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
/* Request setup */
@@ -359,9 +352,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
- auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
- sizeof(struct icp_qat_hw_auth_counter) +
- round_up(hash_cd_ctrl->inner_state1_sz, 8);
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
return 0;
@@ -399,8 +389,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
hash->sha.inner_setup.auth_counter.counter =
cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
- if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
- (uint8_t *)ctx->auth_hw_state_dec))
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
/* Request setup */
@@ -450,9 +439,6 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
- auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
- sizeof(struct icp_qat_hw_auth_counter) +
- round_up(hash_cd_ctrl->inner_state1_sz, 8);
auth_param->auth_res_sz = digestsize;
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
@@ -512,10 +498,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
dev = &GET_DEV(ctx->inst->accel_dev);
memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
- memset(ctx->auth_hw_state_enc, 0,
- sizeof(struct qat_auth_state));
- memset(ctx->auth_hw_state_dec, 0,
- sizeof(struct qat_auth_state));
memset(&ctx->enc_fw_req_tmpl, 0,
sizeof(struct icp_qat_fw_la_bulk_req));
memset(&ctx->dec_fw_req_tmpl, 0,
@@ -548,22 +530,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
spin_unlock(&ctx->lock);
goto out_free_enc;
}
- ctx->auth_hw_state_enc =
- dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
- &ctx->auth_state_enc_paddr,
- GFP_ATOMIC);
- if (!ctx->auth_hw_state_enc) {
- spin_unlock(&ctx->lock);
- goto out_free_dec;
- }
- ctx->auth_hw_state_dec =
- dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
- &ctx->auth_state_dec_paddr,
- GFP_ATOMIC);
- if (!ctx->auth_hw_state_dec) {
- spin_unlock(&ctx->lock);
- goto out_free_auth_enc;
- }
}
spin_unlock(&ctx->lock);
if (qat_alg_init_sessions(ctx, key, keylen))
@@ -572,14 +538,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
return 0;
out_free_all:
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
- ctx->auth_hw_state_dec = NULL;
-out_free_auth_enc:
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
- ctx->auth_hw_state_enc = NULL;
-out_free_dec:
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
@@ -924,16 +882,6 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
if (ctx->dec_cd)
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
- if (ctx->auth_hw_state_enc)
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_enc,
- ctx->auth_state_enc_paddr);
-
- if (ctx->auth_hw_state_dec)
- dma_free_coherent(dev, sizeof(struct qat_auth_state),
- ctx->auth_hw_state_dec,
- ctx->auth_state_dec_paddr);
-
qat_crypto_put_instance(inst);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d4172dedf775..67ec61e51185 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -70,9 +70,9 @@ static int adf_enable_msix(struct adf_accel_dev *accel_dev)
for (i = 0; i < msix_num_entries; i++)
pci_dev_info->msix_entries.entries[i].entry = i;
- if (pci_enable_msix(pci_dev_info->pci_dev,
- pci_dev_info->msix_entries.entries,
- msix_num_entries)) {
+ if (pci_enable_msix_exact(pci_dev_info->pci_dev,
+ pci_dev_info->msix_entries.entries,
+ msix_num_entries)) {
pr_err("QAT: Failed to enable MSIX IRQ\n");
return -EFAULT;
}
@@ -89,7 +89,7 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
struct adf_etr_bank_data *bank = bank_ptr;
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
- tasklet_hi_schedule(&bank->resp_hanlder);
+ tasklet_hi_schedule(&bank->resp_handler);
return IRQ_HANDLED;
}
@@ -217,7 +217,7 @@ static int adf_setup_bh(struct adf_accel_dev *accel_dev)
int i;
for (i = 0; i < hw_data->num_banks; i++)
- tasklet_init(&priv_data->banks[i].resp_hanlder,
+ tasklet_init(&priv_data->banks[i].resp_handler,
adf_response_handler,
(unsigned long)&priv_data->banks[i]);
return 0;
@@ -230,8 +230,8 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
int i;
for (i = 0; i < hw_data->num_banks; i++) {
- tasklet_disable(&priv_data->banks[i].resp_hanlder);
- tasklet_kill(&priv_data->banks[i].resp_hanlder);
+ tasklet_disable(&priv_data->banks[i].resp_handler);
+ tasklet_kill(&priv_data->banks[i].resp_handler);
}
}