summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 11:22:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 11:22:54 -0700
commit70477371dc350746d10431d74f0f213a8d59924c (patch)
tree6271978b6e4ee4b1e6f22775ad7fc0930c09d3ee /drivers/crypto
parent09fd671ccb2475436bd5f597f751ca4a7d177aea (diff)
parent34074205bb9f04b416efb3cbedcd90f418c86200 (diff)
downloadlinux-70477371dc350746d10431d74f0f213a8d59924c.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 4.6: API: - Convert remaining crypto_hash users to shash or ahash, also convert blkcipher/ablkcipher users to skcipher. - Remove crypto_hash interface. - Remove crypto_pcomp interface. - Add crypto engine for async cipher drivers. - Add akcipher documentation. - Add skcipher documentation. Algorithms: - Rename crypto/crc32 to avoid name clash with lib/crc32. - Fix bug in keywrap where we zero the wrong pointer. Drivers: - Support T5/M5, T7/M7 SPARC CPUs in n2 hwrng driver. - Add PIC32 hwrng driver. - Support BCM6368 in bcm63xx hwrng driver. - Pack structs for 32-bit compat users in qat. - Use crypto engine in omap-aes. - Add support for sama5d2x SoCs in atmel-sha. - Make atmel-sha available again. - Make sahara hashing available again. - Make ccp hashing available again. - Make sha1-mb available again. - Add support for multiple devices in ccp. - Improve DMA performance in caam. - Add hashing support to rockchip" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits) crypto: qat - remove redundant arbiter configuration crypto: ux500 - fix checks of error code returned by devm_ioremap_resource() crypto: atmel - fix checks of error code returned by devm_ioremap_resource() crypto: qat - Change the definition of icp_qat_uof_regtype hwrng: exynos - use __maybe_unused to hide pm functions crypto: ccp - Add abstraction for device-specific calls crypto: ccp - CCP versioning support crypto: ccp - Support for multiple CCPs crypto: ccp - Remove check for x86 family and model crypto: ccp - memset request context to zero during import lib/mpi: use "static inline" instead of "extern inline" lib/mpi: avoid assembler warning hwrng: bcm63xx - fix non device tree compatibility crypto: testmgr - allow rfc3686 aes-ctr variants in fips mode. crypto: qat - The AE id should be less than the maximal AE number lib/mpi: Endianness fix crypto: rockchip - add hash support for crypto engine in rk3288 crypto: xts - fix compile errors crypto: doc - add skcipher API documentation crypto: doc - update AEAD AD handling ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/atmel-aes.c10
-rw-r--r--drivers/crypto/atmel-sha-regs.h4
-rw-r--r--drivers/crypto/atmel-sha.c200
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/ccp/Makefile2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c36
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c12
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c49
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h22
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c533
-rw-r--r--drivers/crypto/ccp/ccp-dev.c471
-rw-r--r--drivers/crypto/ccp/ccp-dev.h155
-rw-r--r--drivers/crypto/ccp/ccp-ops.c381
-rw-r--r--drivers/crypto/ccp/ccp-pci.c23
-rw-r--r--drivers/crypto/ccp/ccp-platform.c48
-rw-r--r--drivers/crypto/ixp4xx_crypto.c26
-rw-r--r--drivers/crypto/omap-aes.c97
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_user.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c19
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h42
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c70
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c2
-rw-r--r--drivers/crypto/rockchip/Makefile1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c28
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h56
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c20
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c404
-rw-r--r--drivers/crypto/s5p-sss.c12
-rw-r--r--drivers/crypto/sahara.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c5
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
39 files changed, 1810 insertions, 980 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 07d494276aad..477fffdb4f49 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -296,6 +296,7 @@ config CRYPTO_DEV_OMAP_AES
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
select CRYPTO_BLKCIPHER
+ select CRYPTO_ENGINE
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
@@ -487,7 +488,7 @@ config CRYPTO_DEV_IMGTEC_HASH
config CRYPTO_DEV_SUN4I_SS
tristate "Support for Allwinner Security System cryptographic accelerator"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI && !64BIT
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_AES
@@ -507,6 +508,10 @@ config CRYPTO_DEV_ROCKCHIP
depends on OF && ARCH_ROCKCHIP
select CRYPTO_AES
select CRYPTO_DES
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
select CRYPTO_BLKCIPHER
help
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 3eb3f1279fb7..e3d40a8dfffb 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -369,12 +369,6 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
return len ? block_size - len : 0;
}
-static inline struct aead_request *
-aead_request_cast(struct crypto_async_request *req)
-{
- return container_of(req, struct aead_request, base);
-}
-
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
@@ -2085,9 +2079,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (!aes_dd->io_base) {
+ if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(aes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index 83b2d7425666..e08897109cab 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -8,6 +8,8 @@
#define SHA_CR_START (1 << 0)
#define SHA_CR_FIRST (1 << 4)
#define SHA_CR_SWRST (1 << 8)
+#define SHA_CR_WUIHV (1 << 12)
+#define SHA_CR_WUIEHV (1 << 13)
#define SHA_MR 0x04
#define SHA_MR_MODE_MASK (0x3 << 0)
@@ -15,6 +17,8 @@
#define SHA_MR_MODE_AUTO 0x1
#define SHA_MR_MODE_PDC 0x2
#define SHA_MR_PROCDLY (1 << 4)
+#define SHA_MR_UIHV (1 << 5)
+#define SHA_MR_UIEHV (1 << 6)
#define SHA_MR_ALGO_SHA1 (0 << 8)
#define SHA_MR_ALGO_SHA256 (1 << 8)
#define SHA_MR_ALGO_SHA384 (2 << 8)
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8bf9914d4d15..97e34799e077 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -53,6 +53,7 @@
#define SHA_FLAGS_FINUP BIT(16)
#define SHA_FLAGS_SG BIT(17)
+#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
#define SHA_FLAGS_SHA1 BIT(18)
#define SHA_FLAGS_SHA224 BIT(19)
#define SHA_FLAGS_SHA256 BIT(20)
@@ -60,11 +61,12 @@
#define SHA_FLAGS_SHA512 BIT(22)
#define SHA_FLAGS_ERROR BIT(23)
#define SHA_FLAGS_PAD BIT(24)
+#define SHA_FLAGS_RESTORE BIT(25)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
-#define SHA_BUFFER_LEN PAGE_SIZE
+#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
#define ATMEL_SHA_DMA_THRESHOLD 56
@@ -73,10 +75,15 @@ struct atmel_sha_caps {
bool has_dualbuff;
bool has_sha224;
bool has_sha_384_512;
+ bool has_uihv;
};
struct atmel_sha_dev;
+/*
+ * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
struct atmel_sha_reqctx {
struct atmel_sha_dev *dd;
unsigned long flags;
@@ -95,7 +102,7 @@ struct atmel_sha_reqctx {
size_t block_size;
- u8 buffer[0] __aligned(sizeof(u32));
+ u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
struct atmel_sha_ctx {
@@ -122,6 +129,7 @@ struct atmel_sha_dev {
spinlock_t lock;
int err;
struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
unsigned long flags;
struct crypto_queue queue;
@@ -317,7 +325,8 @@ static int atmel_sha_init(struct ahash_request *req)
static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
- u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+ u32 valmr = SHA_MR_MODE_AUTO;
+ unsigned int i, hashsize = 0;
if (likely(dma)) {
if (!dd->caps.has_dma)
@@ -329,22 +338,62 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
}
- if (ctx->flags & SHA_FLAGS_SHA1)
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
valmr |= SHA_MR_ALGO_SHA1;
- else if (ctx->flags & SHA_FLAGS_SHA224)
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA224:
valmr |= SHA_MR_ALGO_SHA224;
- else if (ctx->flags & SHA_FLAGS_SHA256)
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA256:
valmr |= SHA_MR_ALGO_SHA256;
- else if (ctx->flags & SHA_FLAGS_SHA384)
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA384:
valmr |= SHA_MR_ALGO_SHA384;
- else if (ctx->flags & SHA_FLAGS_SHA512)
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA512:
valmr |= SHA_MR_ALGO_SHA512;
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ default:
+ break;
+ }
/* Setting CR_FIRST only for the first iteration */
- if (!(ctx->digcnt[0] || ctx->digcnt[1]))
- valcr = SHA_CR_FIRST;
+ if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
+ atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
+ const u32 *hash = (const u32 *)ctx->digest;
+
+ /*
+ * Restore the hardware context: update the User Initialize
+ * Hash Value (UIHV) with the value saved when the latest
+ * 'update' operation completed on this very same crypto
+ * request.
+ */
+ ctx->flags &= ~SHA_FLAGS_RESTORE;
+ atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+ for (i = 0; i < hashsize / sizeof(u32); ++i)
+ atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
+ atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ valmr |= SHA_MR_UIHV;
+ }
+ /*
+ * WARNING: If the UIHV feature is not available, the hardware CANNOT
+ * process concurrent requests: the internal registers used to store
+ * the hash/digest are still set to the partial digest output values
+ * computed during the latest round.
+ */
- atmel_sha_write(dd, SHA_CR, valcr);
atmel_sha_write(dd, SHA_MR, valmr);
}
@@ -713,23 +762,31 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
u32 *hash = (u32 *)ctx->digest;
- int i;
+ unsigned int i, hashsize;
- if (ctx->flags & SHA_FLAGS_SHA1)
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA224)
- for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA256)
- for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA384)
- for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else
- for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ default:
+ /* Should not happen... */
+ return;
+ }
+
+ for (i = 0; i < hashsize / sizeof(u32); ++i)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ ctx->flags |= SHA_FLAGS_RESTORE;
}
static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -788,7 +845,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
req->base.complete(&req->base, err);
/* handle new request */
- tasklet_schedule(&dd->done_task);
+ tasklet_schedule(&dd->queue_task);
}
static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
@@ -922,36 +979,17 @@ static int atmel_sha_update(struct ahash_request *req)
static int atmel_sha_final(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct atmel_sha_dev *dd = tctx->dd;
-
- int err = 0;
ctx->flags |= SHA_FLAGS_FINUP;
if (ctx->flags & SHA_FLAGS_ERROR)
return 0; /* uncompleted hash is not needed */
- if (ctx->bufcnt) {
- return atmel_sha_enqueue(req, SHA_OP_FINAL);
- } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
- err = atmel_sha_hw_init(dd);
- if (err)
- goto err1;
-
- dd->flags |= SHA_FLAGS_BUSY;
- err = atmel_sha_final_req(dd);
- } else {
+ if (ctx->flags & SHA_FLAGS_PAD)
/* copy ready hash (+ finalize hmac) */
return atmel_sha_finish(req);
- }
-
-err1:
- if (err != -EINPROGRESS)
- /* done_task will not finish it, so do it here */
- atmel_sha_finish_req(req, err);
- return err;
+ return atmel_sha_enqueue(req, SHA_OP_FINAL);
}
static int atmel_sha_finup(struct ahash_request *req)
@@ -979,11 +1017,27 @@ static int atmel_sha_digest(struct ahash_request *req)
return atmel_sha_init(req) ?: atmel_sha_finup(req);
}
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+ const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
static int atmel_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct atmel_sha_reqctx) +
- SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
+ sizeof(struct atmel_sha_reqctx));
return 0;
}
@@ -995,8 +1049,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha1",
.cra_driver_name = "atmel-sha1",
@@ -1016,8 +1073,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha256",
.cra_driver_name = "atmel-sha256",
@@ -1039,8 +1099,11 @@ static struct ahash_alg sha_224_alg = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha224",
.cra_driver_name = "atmel-sha224",
@@ -1062,8 +1125,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha384",
.cra_driver_name = "atmel-sha384",
@@ -1083,8 +1149,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha512",
.cra_driver_name = "atmel-sha512",
@@ -1100,16 +1169,18 @@ static struct ahash_alg sha_384_512_algs[] = {
},
};
+static void atmel_sha_queue_task(unsigned long data)
+{
+ struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+ atmel_sha_handle_queue(dd, NULL);
+}
+
static void atmel_sha_done_task(unsigned long data)
{
struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
int err = 0;
- if (!(SHA_FLAGS_BUSY & dd->flags)) {
- atmel_sha_handle_queue(dd, NULL);
- return;
- }
-
if (SHA_FLAGS_CPU & dd->flags) {
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
@@ -1272,14 +1343,23 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
dd->caps.has_dualbuff = 0;
dd->caps.has_sha224 = 0;
dd->caps.has_sha_384_512 = 0;
+ dd->caps.has_uihv = 0;
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x510:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ dd->caps.has_uihv = 1;
+ break;
case 0x420:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
dd->caps.has_sha224 = 1;
dd->caps.has_sha_384_512 = 1;
+ dd->caps.has_uihv = 1;
break;
case 0x410:
dd->caps.has_dma = 1;
@@ -1366,6 +1446,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
(unsigned long)sha_dd);
+ tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
+ (unsigned long)sha_dd);
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
@@ -1404,9 +1486,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (!sha_dd->io_base) {
+ if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(sha_dd->io_base);
goto res_err;
}
@@ -1464,6 +1546,7 @@ err_sha_dma:
iclk_unprepare:
clk_unprepare(sha_dd->iclk);
res_err:
+ tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
dev_err(dev, "initialization failed.\n");
@@ -1484,6 +1567,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
atmel_sha_unregister_algs(sha_dd);
+ tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
if (sha_dd->caps.has_dma)
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 2c7a628d0375..bf467d7be35c 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (!tdes_dd->io_base) {
+ if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(tdes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 69d4a1326fee..44d30b45f3cc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -534,7 +534,7 @@ static int caam_probe(struct platform_device *pdev)
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f7e0d8d4c3da..6fd63a600614 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -65,7 +65,7 @@ static int caam_reset_hw_jr(struct device *dev)
/*
* Shutdown JobR independent of platform property code
*/
-int caam_jr_shutdown(struct device *dev)
+static int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
dma_addr_t inpbusaddr, outbusaddr;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index a8a79975682f..0ba9c40597dc 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -455,7 +455,8 @@ struct caam_ctrl {
#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */
/* JRSTART register offsets */
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 55a1f3951578..b750592cc936 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o
+ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d89f20c04266..3d9acc53d247 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->null_msg = state.null_msg;
+ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
+ alg->export = ccp_aes_cmac_export;
+ alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
+ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 7984f910884d..89291c15015c 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -259,6 +259,7 @@ static struct crypto_alg ccp_aes_rfc3686_defaults = {
struct ccp_aes_def {
enum ccp_aes_mode mode;
+ unsigned int version;
const char *name;
const char *driver_name;
unsigned int blocksize;
@@ -269,6 +270,7 @@ struct ccp_aes_def {
static struct ccp_aes_def aes_algs[] = {
{
.mode = CCP_AES_MODE_ECB,
+ .version = CCP_VERSION(3, 0),
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -277,6 +279,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CBC,
+ .version = CCP_VERSION(3, 0),
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -285,6 +288,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CFB,
+ .version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -293,6 +297,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_OFB,
+ .version = CCP_VERSION(3, 0),
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccp",
.blocksize = 1,
@@ -301,6 +306,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CTR,
+ .version = CCP_VERSION(3, 0),
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccp",
.blocksize = 1,
@@ -309,6 +315,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CTR,
+ .version = CCP_VERSION(3, 0),
.name = "rfc3686(ctr(aes))",
.driver_name = "rfc3686-ctr-aes-ccp",
.blocksize = 1,
@@ -357,8 +364,11 @@ static int ccp_register_aes_alg(struct list_head *head,
int ccp_register_aes_algs(struct list_head *head)
{
int i, ret;
+ unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ if (aes_algs[i].version > ccpversion)
+ continue;
ret = ccp_register_aes_alg(head, &aes_algs[i]);
if (ret)
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index d14b3f28e010..b5ad72897dc2 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->type = state.type;
+ rctx->msg_bits = state.msg_bits;
+ rctx->first = state.first;
+ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -304,6 +341,7 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
}
struct ccp_sha_def {
+ unsigned int version;
const char *name;
const char *drv_name;
enum ccp_sha_type type;
@@ -313,6 +351,7 @@ struct ccp_sha_def {
static struct ccp_sha_def sha_algs[] = {
{
+ .version = CCP_VERSION(3, 0),
.name = "sha1",
.drv_name = "sha1-ccp",
.type = CCP_SHA_TYPE_1,
@@ -320,6 +359,7 @@ static struct ccp_sha_def sha_algs[] = {
.block_size = SHA1_BLOCK_SIZE,
},
{
+ .version = CCP_VERSION(3, 0),
.name = "sha224",
.drv_name = "sha224-ccp",
.type = CCP_SHA_TYPE_224,
@@ -327,6 +367,7 @@ static struct ccp_sha_def sha_algs[] = {
.block_size = SHA224_BLOCK_SIZE,
},
{
+ .version = CCP_VERSION(3, 0),
.name = "sha256",
.drv_name = "sha256-ccp",
.type = CCP_SHA_TYPE_256,
@@ -403,9 +444,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
+ alg->export = ccp_sha_export;
+ alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
+ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
@@ -440,8 +484,11 @@ static int ccp_register_sha_alg(struct list_head *head,
int ccp_register_sha_algs(struct list_head *head)
{
int i, ret;
+ unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ if (sha_algs[i].version > ccpversion)
+ continue;
ret = ccp_register_sha_alg(head, &sha_algs[i]);
if (ret)
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0f44c6..a326ec20bfa8 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_aes_cmac_exp_ctx {
+ unsigned int null_msg;
+
+ u8 iv[AES_BLOCK_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+};
+
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_sha_exp_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
new file mode 100644
index 000000000000..7d5eab49179e
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -0,0 +1,533 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
+{
+ struct ccp_cmd_queue *cmd_q = op->cmd_q;
+ struct ccp_device *ccp = cmd_q->ccp;
+ void __iomem *cr_addr;
+ u32 cr0, cmd;
+ unsigned int i;
+ int ret = 0;
+
+ /* We could read a status register to see how many free slots
+ * are actually available, but reading that register resets it
+ * and you could lose some error information.
+ */
+ cmd_q->free_slots--;
+
+ cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
+ | (op->jobid << REQ0_JOBID_SHIFT)
+ | REQ0_WAIT_FOR_WRITE;
+
+ if (op->soc)
+ cr0 |= REQ0_STOP_ON_COMPLETE
+ | REQ0_INT_ON_COMPLETE;
+
+ if (op->ioc || !cmd_q->free_slots)
+ cr0 |= REQ0_INT_ON_COMPLETE;
+
+ /* Start at CMD_REQ1 */
+ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
+
+ mutex_lock(&ccp->req_mutex);
+
+ /* Write CMD_REQ1 through CMD_REQx first */
+ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
+ iowrite32(*(cr + i), cr_addr);
+
+ /* Tell the CCP to start */
+ wmb();
+ iowrite32(cr0, ccp->io_regs + CMD_REQ0);
+
+ mutex_unlock(&ccp->req_mutex);
+
+ if (cr0 & REQ0_INT_ON_COMPLETE) {
+ /* Wait for the job to complete */
+ ret = wait_event_interruptible(cmd_q->int_queue,
+ cmd_q->int_rcvd);
+ if (ret || cmd_q->cmd_error) {
+ /* On error delete all related jobs from the queue */
+ cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+
+ if (!ret)
+ ret = -EIO;
+ } else if (op->soc) {
+ /* Delete just head job from the queue on SoC */
+ cmd = DEL_Q_ACTIVE
+ | (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+ }
+
+ cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 0;
+ }
+
+ return ret;
+}
+
+static int ccp_perform_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
+ | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
+ | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
+ | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->u.aes.mode == CCP_AES_MODE_CFB)
+ cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_xts_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
+ | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
+ | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_sha(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
+ | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
+ | REQ1_INIT;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->eom) {
+ cr[0] |= REQ1_EOM;
+ cr[4] = lower_32_bits(op->u.sha.msg_bits);
+ cr[5] = upper_32_bits(op->u.sha.msg_bits);
+ } else {
+ cr[4] = 0;
+ cr[5] = 0;
+ }
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_rsa(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
+ | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->u.rsa.input_len - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_passthru(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
+ | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
+ | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM)
+ cr[1] = op->src.u.dma.length - 1;
+ else
+ cr[1] = op->dst.u.dma.length - 1;
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+ } else {
+ cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
+ cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+ }
+
+ if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+ } else {
+ cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
+ cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+ }
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_ecc(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = REQ1_ECC_AFFINE_CONVERT
+ | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
+ | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+ u32 trng_value;
+ int len = min_t(int, sizeof(trng_value), max);
+
+ /*
+ * Locking is provided by the caller so we can update device
+ * hwrng-related fields safely
+ */
+ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ if (!trng_value) {
+ /* Zero is returned if not data is available or if a
+ * bad-entropy error is present. Assume an error if
+ * we exceed TRNG_RETRIES reads of zero.
+ */
+ if (ccp->hwrng_retries++ > TRNG_RETRIES)
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Reset the counter and save the rng value */
+ ccp->hwrng_retries = 0;
+ memcpy(data, &trng_value, len);
+
+ return len;
+}
+
+static int ccp_init(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ unsigned int qmr, qim, i;
+ int ret;
+
+ /* Find available queues */
+ qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+
+ /* Allocate a dma pool for this queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+ ccp->name, i);
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ CCP_DMAPOOL_MAX_SIZE,
+ CCP_DMAPOOL_ALIGN, 0);
+ if (!dma_pool) {
+ dev_err(dev, "unable to allocate dma pool\n");
+ ret = -ENOMEM;
+ goto e_pool;
+ }
+
+ cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+ ccp->cmd_q_count++;
+
+ cmd_q->ccp = ccp;
+ cmd_q->id = i;
+ cmd_q->dma_pool = dma_pool;
+
+ /* Reserve 2 KSB regions for the queue */
+ cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
+ cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
+ ccp->ksb_count -= 2;
+
+ /* Preset some register values and masks that are queue
+ * number dependent
+ */
+ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->int_ok = 1 << (i * 2);
+ cmd_q->int_err = 1 << ((i * 2) + 1);
+
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ /* Build queue interrupt mask (two interrupts per queue) */
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+
+#ifdef CONFIG_ARM64
+ /* For arm64 set the recommended queue cache settings */
+ iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+ (CMD_Q_CACHE_INC * i));
+#endif
+
+ dev_dbg(dev, "queue #%u available\n", i);
+ }
+ if (ccp->cmd_q_count == 0) {
+ dev_notice(dev, "no command queues available\n");
+ ret = -EIO;
+ goto e_pool;
+ }
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
+
+ /* Initialize the queues used to wait for KSB space and suspend */
+ init_waitqueue_head(&ccp->ksb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct task_struct *kthread;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
+ if (IS_ERR(kthread)) {
+ dev_err(dev, "error creating queue thread (%ld)\n",
+ PTR_ERR(kthread));
+ ret = PTR_ERR(kthread);
+ goto e_kthread;
+ }
+
+ cmd_q->kthread = kthread;
+ wake_up_process(kthread);
+ }
+
+ /* Register the RNG */
+ ccp->hwrng.name = ccp->rngname;
+ ccp->hwrng.read = ccp_trng_read;
+ ret = hwrng_register(&ccp->hwrng);
+ if (ret) {
+ dev_err(dev, "error registering hwrng (%d)\n", ret);
+ goto e_kthread;
+ }
+
+ ccp_add_device(ccp);
+
+ /* Enable interrupts */
+ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+ return 0;
+
+e_kthread:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ ccp->free_irq(ccp);
+
+e_pool:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ return ret;
+}
+
+static void ccp_destroy(struct ccp_device *ccp)
+{
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+ unsigned int qim, i;
+
+ /* Remove this device from the list of available units first */
+ ccp_del_device(ccp);
+
+ /* Unregister the RNG */
+ hwrng_unregister(&ccp->hwrng);
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ /* Build queue interrupt mask (two interrupt masks per queue) */
+ qim = 0;
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+ }
+
+ /* Disable and clear interrupts */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ ccp->free_irq(ccp);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ /* Flush the cmd and backlog queue */
+ while (!list_empty(&ccp->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+ while (!list_empty(&ccp->backlog)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+}
+
+static irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_cmd_queue *cmd_q;
+ u32 q_int, status;
+ unsigned int i;
+
+ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+ if (q_int) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ccp_actions ccp3_actions = {
+ .perform_aes = ccp_perform_aes,
+ .perform_xts_aes = ccp_perform_xts_aes,
+ .perform_sha = ccp_perform_sha,
+ .perform_rsa = ccp_perform_rsa,
+ .perform_passthru = ccp_perform_passthru,
+ .perform_ecc = ccp_perform_ecc,
+ .init = ccp_init,
+ .destroy = ccp_destroy,
+ .irqhandler = ccp_irq_handler,
+};
+
+struct ccp_vdata ccpv3 = {
+ .version = CCP_VERSION(3, 0),
+ .perform = &ccp3_actions,
+};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 861bacc1bb94..336e5b780fcb 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -16,6 +16,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/rwlock_types.h>
+#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
@@ -37,20 +39,107 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
-static struct ccp_device *ccp_dev;
-static inline struct ccp_device *ccp_get_device(void)
+/* List of CCPs, CCP count, read-write access lock, and access functions
+ *
+ * Lock structure: get ccp_unit_lock for reading whenever we need to
+ * examine the CCP list. While holding it for reading we can acquire
+ * the RR lock to update the round-robin next-CCP pointer. The unit lock
+ * must be acquired before the RR lock.
+ *
+ * If the unit-lock is acquired for writing, we have total control over
+ * the list, so there's no value in getting the RR lock.
+ */
+static DEFINE_RWLOCK(ccp_unit_lock);
+static LIST_HEAD(ccp_units);
+
+/* Round-robin counter */
+static DEFINE_RWLOCK(ccp_rr_lock);
+static struct ccp_device *ccp_rr;
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t ccp_unit_ordinal;
+unsigned int ccp_increment_unit_ordinal(void)
{
- return ccp_dev;
+ return atomic_inc_return(&ccp_unit_ordinal);
}
-static inline void ccp_add_device(struct ccp_device *ccp)
+/**
+ * ccp_add_device - add a CCP device to the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Put this CCP on the unit list, which makes it available
+ * for use.
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+void ccp_add_device(struct ccp_device *ccp)
{
- ccp_dev = ccp;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ list_add_tail(&ccp->entry, &ccp_units);
+ if (!ccp_rr)
+ /* We already have the list lock (we're first) so this
+ * pointer can't change on us. Set its initial value.
+ */
+ ccp_rr = ccp;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
}
-static inline void ccp_del_device(struct ccp_device *ccp)
+/**
+ * ccp_del_device - remove a CCP device from the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Remove this unit from the list of devices. If the next device
+ * up for use is this one, adjust the pointer. If this is the last
+ * device, NULL the pointer.
+ */
+void ccp_del_device(struct ccp_device *ccp)
{
- ccp_dev = NULL;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ if (ccp_rr == ccp) {
+ /* ccp_unit_lock is read/write; any read access
+ * will be suspended while we make changes to the
+ * list and RR pointer.
+ */
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ }
+ list_del(&ccp->entry);
+ if (list_empty(&ccp_units))
+ ccp_rr = NULL;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
+}
+
+static struct ccp_device *ccp_get_device(void)
+{
+ unsigned long flags;
+ struct ccp_device *dp = NULL;
+
+ /* We round-robin through the unit list.
+ * The (ccp_rr) pointer refers to the next unit to use.
+ */
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ write_lock_irqsave(&ccp_rr_lock, flags);
+ dp = ccp_rr;
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ write_unlock_irqrestore(&ccp_rr_lock, flags);
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return dp;
}
/**
@@ -60,14 +149,41 @@ static inline void ccp_del_device(struct ccp_device *ccp)
*/
int ccp_present(void)
{
- if (ccp_get_device())
- return 0;
+ unsigned long flags;
+ int ret;
- return -ENODEV;
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ ret = list_empty(&ccp_units);
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret ? -ENODEV : 0;
}
EXPORT_SYMBOL_GPL(ccp_present);
/**
+ * ccp_version - get the version of the CCP device
+ *
+ * Returns the version from the first unit on the list;
+ * otherwise a zero if no CCP device is present
+ */
+unsigned int ccp_version(void)
+{
+ struct ccp_device *dp;
+ unsigned long flags;
+ int ret = 0;
+
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ dp = list_first_entry(&ccp_units, struct ccp_device, entry);
+ ret = dp->vdata->version;
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_version);
+
+/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
@@ -221,7 +337,12 @@ static void ccp_do_cmd_complete(unsigned long data)
complete(&tdata->completion);
}
-static int ccp_cmd_queue_thread(void *data)
+/**
+ * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
+ *
+ * @data: thread-specific data
+ */
+int ccp_cmd_queue_thread(void *data)
{
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
@@ -257,35 +378,6 @@ static int ccp_cmd_queue_thread(void *data)
return 0;
}
-static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
- u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
-
- /*
- * Locking is provided by the caller so we can update device
- * hwrng-related fields safely
- */
- trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
- if (!trng_value) {
- /* Zero is returned if not data is available or if a
- * bad-entropy error is present. Assume an error if
- * we exceed TRNG_RETRIES reads of zero.
- */
- if (ccp->hwrng_retries++ > TRNG_RETRIES)
- return -EIO;
-
- return 0;
- }
-
- /* Reset the counter and save the rng value */
- ccp->hwrng_retries = 0;
- memcpy(data, &trng_value, len);
-
- return len;
-}
-
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
@@ -309,253 +401,11 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->ksb_count = KSB_COUNT;
ccp->ksb_start = 0;
- return ccp;
-}
-
-/**
- * ccp_init - initialize the CCP device
- *
- * @ccp: ccp_device struct
- */
-int ccp_init(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct ccp_cmd_queue *cmd_q;
- struct dma_pool *dma_pool;
- char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
- int ret;
-
- /* Find available queues */
- qim = 0;
- qmr = ioread32(ccp->io_regs + Q_MASK_REG);
- for (i = 0; i < MAX_HW_QUEUES; i++) {
- if (!(qmr & (1 << i)))
- continue;
-
- /* Allocate a dma pool for this queue */
- snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
- dma_pool = dma_pool_create(dma_pool_name, dev,
- CCP_DMAPOOL_MAX_SIZE,
- CCP_DMAPOOL_ALIGN, 0);
- if (!dma_pool) {
- dev_err(dev, "unable to allocate dma pool\n");
- ret = -ENOMEM;
- goto e_pool;
- }
-
- cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
- ccp->cmd_q_count++;
-
- cmd_q->ccp = ccp;
- cmd_q->id = i;
- cmd_q->dma_pool = dma_pool;
-
- /* Reserve 2 KSB regions for the queue */
- cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
- cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
- ccp->ksb_count -= 2;
-
- /* Preset some register values and masks that are queue
- * number dependent
- */
- cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->int_ok = 1 << (i * 2);
- cmd_q->int_err = 1 << ((i * 2) + 1);
-
- cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
-
- init_waitqueue_head(&cmd_q->int_queue);
-
- /* Build queue interrupt mask (two interrupts per queue) */
- qim |= cmd_q->int_ok | cmd_q->int_err;
+ ccp->ord = ccp_increment_unit_ordinal();
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
-#ifdef CONFIG_ARM64
- /* For arm64 set the recommended queue cache settings */
- iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
- (CMD_Q_CACHE_INC * i));
-#endif
-
- dev_dbg(dev, "queue #%u available\n", i);
- }
- if (ccp->cmd_q_count == 0) {
- dev_notice(dev, "no command queues available\n");
- ret = -EIO;
- goto e_pool;
- }
- dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
-
- /* Disable and clear interrupts until ready */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- /* Request an irq */
- ret = ccp->get_irq(ccp);
- if (ret) {
- dev_err(dev, "unable to allocate an IRQ\n");
- goto e_pool;
- }
-
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->ksb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
- /* Create a kthread for each queue */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- struct task_struct *kthread;
-
- cmd_q = &ccp->cmd_q[i];
-
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "ccp-q%u", cmd_q->id);
- if (IS_ERR(kthread)) {
- dev_err(dev, "error creating queue thread (%ld)\n",
- PTR_ERR(kthread));
- ret = PTR_ERR(kthread);
- goto e_kthread;
- }
-
- cmd_q->kthread = kthread;
- wake_up_process(kthread);
- }
-
- /* Register the RNG */
- ccp->hwrng.name = "ccp-rng";
- ccp->hwrng.read = ccp_trng_read;
- ret = hwrng_register(&ccp->hwrng);
- if (ret) {
- dev_err(dev, "error registering hwrng (%d)\n", ret);
- goto e_kthread;
- }
-
- /* Make the device struct available before enabling interrupts */
- ccp_add_device(ccp);
-
- /* Enable interrupts */
- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
-
- return 0;
-
-e_kthread:
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- ccp->free_irq(ccp);
-
-e_pool:
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- return ret;
-}
-
-/**
- * ccp_destroy - tear down the CCP device
- *
- * @ccp: ccp_device struct
- */
-void ccp_destroy(struct ccp_device *ccp)
-{
- struct ccp_cmd_queue *cmd_q;
- struct ccp_cmd *cmd;
- unsigned int qim, i;
-
- /* Remove general access to the device struct */
- ccp_del_device(ccp);
-
- /* Unregister the RNG */
- hwrng_unregister(&ccp->hwrng);
-
- /* Stop the queue kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- /* Build queue interrupt mask (two interrupt masks per queue) */
- qim = 0;
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
- qim |= cmd_q->int_ok | cmd_q->int_err;
- }
-
- /* Disable and clear interrupts */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- ccp->free_irq(ccp);
-
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- /* Flush the cmd and backlog queue */
- while (!list_empty(&ccp->cmd)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
- while (!list_empty(&ccp->backlog)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
-}
-
-/**
- * ccp_irq_handler - handle interrupts generated by the CCP device
- *
- * @irq: the irq associated with the interrupt
- * @data: the data value supplied when the irq was created
- */
-irqreturn_t ccp_irq_handler(int irq, void *data)
-{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- struct ccp_cmd_queue *cmd_q;
- u32 q_int, status;
- unsigned int i;
-
- status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
-
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- q_int = status & (cmd_q->int_ok | cmd_q->int_err);
- if (q_int) {
- cmd_q->int_status = status;
- cmd_q->q_status = ioread32(cmd_q->reg_status);
- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
- /* On error, only save the first error value */
- if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
- cmd_q->int_rcvd = 1;
-
- /* Acknowledge the interrupt and wake the kthread */
- iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
- wake_up_interruptible(&cmd_q->int_queue);
- }
- }
-
- return IRQ_HANDLED;
+ return ccp;
}
#ifdef CONFIG_PM
@@ -577,41 +427,22 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
}
#endif
-#ifdef CONFIG_X86
-static const struct x86_cpu_id ccp_support[] = {
- { X86_VENDOR_AMD, 22, },
- { },
-};
-#endif
-
static int __init ccp_mod_init(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
- if (!x86_match_cpu(ccp_support))
- return -ENODEV;
-
- switch (cpuinfo->x86) {
- case 22:
- if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
- return -ENODEV;
-
- ret = ccp_pci_init();
- if (ret)
- return ret;
-
- /* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
- ccp_pci_exit();
- return -ENODEV;
- }
-
- return 0;
+ ret = ccp_pci_init();
+ if (ret)
+ return ret;
- break;
+ /* Don't leave the driver loaded if init failed */
+ if (ccp_present() != 0) {
+ ccp_pci_exit();
+ return -ENODEV;
}
+
+ return 0;
#endif
#ifdef CONFIG_ARM64
@@ -622,7 +453,7 @@ static int __init ccp_mod_init(void)
return ret;
/* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
+ if (ccp_present() != 0) {
ccp_platform_exit();
return -ENODEV;
}
@@ -636,13 +467,7 @@ static int __init ccp_mod_init(void)
static void __exit ccp_mod_exit(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
-
- switch (cpuinfo->x86) {
- case 22:
- ccp_pci_exit();
- break;
- }
+ ccp_pci_exit();
#endif
#ifdef CONFIG_ARM64
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 6ff89031fb96..7745d0be491d 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -23,6 +23,7 @@
#include <linux/hw_random.h>
#include <linux/bitops.h>
+#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
#define MAX_HW_QUEUES 5
@@ -140,6 +141,29 @@
#define CCP_ECC_RESULT_OFFSET 60
#define CCP_ECC_RESULT_SUCCESS 0x0001
+struct ccp_op;
+
+/* Structure for computation functions that are device-specific */
+struct ccp_actions {
+ int (*perform_aes)(struct ccp_op *);
+ int (*perform_xts_aes)(struct ccp_op *);
+ int (*perform_sha)(struct ccp_op *);
+ int (*perform_rsa)(struct ccp_op *);
+ int (*perform_passthru)(struct ccp_op *);
+ int (*perform_ecc)(struct ccp_op *);
+ int (*init)(struct ccp_device *);
+ void (*destroy)(struct ccp_device *);
+ irqreturn_t (*irqhandler)(int, void *);
+};
+
+/* Structure to hold CCP version-specific values */
+struct ccp_vdata {
+ unsigned int version;
+ struct ccp_actions *perform;
+};
+
+extern struct ccp_vdata ccpv3;
+
struct ccp_device;
struct ccp_cmd;
@@ -184,6 +208,13 @@ struct ccp_cmd_queue {
} ____cacheline_aligned;
struct ccp_device {
+ struct list_head entry;
+
+ struct ccp_vdata *vdata;
+ unsigned int ord;
+ char name[MAX_CCP_NAME_LEN];
+ char rngname[MAX_CCP_NAME_LEN];
+
struct device *dev;
/*
@@ -258,18 +289,132 @@ struct ccp_device {
unsigned int axcache;
};
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_KSB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE__LAST,
+};
+
+struct ccp_dma_info {
+ dma_addr_t address;
+ unsigned int offset;
+ unsigned int length;
+ enum dma_data_direction dir;
+};
+
+struct ccp_dm_workarea {
+ struct device *dev;
+ struct dma_pool *dma_pool;
+ unsigned int length;
+
+ u8 *address;
+ struct ccp_dma_info dma;
+};
+
+struct ccp_sg_workarea {
+ struct scatterlist *sg;
+ int nents;
+
+ struct scatterlist *dma_sg;
+ struct device *dma_dev;
+ unsigned int dma_count;
+ enum dma_data_direction dma_dir;
+
+ unsigned int sg_used;
+
+ u64 bytes_left;
+};
+
+struct ccp_data {
+ struct ccp_sg_workarea sg_wa;
+ struct ccp_dm_workarea dm_wa;
+};
+
+struct ccp_mem {
+ enum ccp_memtype type;
+ union {
+ struct ccp_dma_info dma;
+ u32 ksb;
+ } u;
+};
+
+struct ccp_aes_op {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+};
+
+struct ccp_xts_aes_op {
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+};
+
+struct ccp_sha_op {
+ enum ccp_sha_type type;
+ u64 msg_bits;
+};
+
+struct ccp_rsa_op {
+ u32 mod_size;
+ u32 input_len;
+};
+
+struct ccp_passthru_op {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+};
+
+struct ccp_ecc_op {
+ enum ccp_ecc_function function;
+};
+
+struct ccp_op {
+ struct ccp_cmd_queue *cmd_q;
+
+ u32 jobid;
+ u32 ioc;
+ u32 soc;
+ u32 ksb_key;
+ u32 ksb_ctx;
+ u32 init;
+ u32 eom;
+
+ struct ccp_mem src;
+ struct ccp_mem dst;
+
+ union {
+ struct ccp_aes_op aes;
+ struct ccp_xts_aes_op xts;
+ struct ccp_sha_op sha;
+ struct ccp_rsa_op rsa;
+ struct ccp_passthru_op passthru;
+ struct ccp_ecc_op ecc;
+ } u;
+};
+
+static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
+{
+ return lower_32_bits(info->address + info->offset);
+}
+
+static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
+{
+ return upper_32_bits(info->address + info->offset) & 0x0000ffff;
+}
+
int ccp_pci_init(void);
void ccp_pci_exit(void);
int ccp_platform_init(void);
void ccp_platform_exit(void);
+void ccp_add_device(struct ccp_device *ccp);
+void ccp_del_device(struct ccp_device *ccp);
+
struct ccp_device *ccp_alloc_struct(struct device *dev);
-int ccp_init(struct ccp_device *ccp);
-void ccp_destroy(struct ccp_device *ccp);
bool ccp_queues_suspended(struct ccp_device *ccp);
-
-irqreturn_t ccp_irq_handler(int irq, void *data);
+int ccp_cmd_queue_thread(void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 6613aee79b87..eefdf595f758 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -13,124 +13,12 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
+#include <linux/ccp.h>
#include "ccp-dev.h"
-enum ccp_memtype {
- CCP_MEMTYPE_SYSTEM = 0,
- CCP_MEMTYPE_KSB,
- CCP_MEMTYPE_LOCAL,
- CCP_MEMTYPE__LAST,
-};
-
-struct ccp_dma_info {
- dma_addr_t address;
- unsigned int offset;
- unsigned int length;
- enum dma_data_direction dir;
-};
-
-struct ccp_dm_workarea {
- struct device *dev;
- struct dma_pool *dma_pool;
- unsigned int length;
-
- u8 *address;
- struct ccp_dma_info dma;
-};
-
-struct ccp_sg_workarea {
- struct scatterlist *sg;
- int nents;
-
- struct scatterlist *dma_sg;
- struct device *dma_dev;
- unsigned int dma_count;
- enum dma_data_direction dma_dir;
-
- unsigned int sg_used;
-
- u64 bytes_left;
-};
-
-struct ccp_data {
- struct ccp_sg_workarea sg_wa;
- struct ccp_dm_workarea dm_wa;
-};
-
-struct ccp_mem {
- enum ccp_memtype type;
- union {
- struct ccp_dma_info dma;
- u32 ksb;
- } u;
-};
-
-struct ccp_aes_op {
- enum ccp_aes_type type;
- enum ccp_aes_mode mode;
- enum ccp_aes_action action;
-};
-
-struct ccp_xts_aes_op {
- enum ccp_aes_action action;
- enum ccp_xts_aes_unit_size unit_size;
-};
-
-struct ccp_sha_op {
- enum ccp_sha_type type;
- u64 msg_bits;
-};
-
-struct ccp_rsa_op {
- u32 mod_size;
- u32 input_len;
-};
-
-struct ccp_passthru_op {
- enum ccp_passthru_bitwise bit_mod;
- enum ccp_passthru_byteswap byte_swap;
-};
-
-struct ccp_ecc_op {
- enum ccp_ecc_function function;
-};
-
-struct ccp_op {
- struct ccp_cmd_queue *cmd_q;
-
- u32 jobid;
- u32 ioc;
- u32 soc;
- u32 ksb_key;
- u32 ksb_ctx;
- u32 init;
- u32 eom;
-
- struct ccp_mem src;
- struct ccp_mem dst;
-
- union {
- struct ccp_aes_op aes;
- struct ccp_xts_aes_op xts;
- struct ccp_sha_op sha;
- struct ccp_rsa_op rsa;
- struct ccp_passthru_op passthru;
- struct ccp_ecc_op ecc;
- } u;
-};
-
/* SHA initial context values */
static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
@@ -152,253 +40,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
-static u32 ccp_addr_lo(struct ccp_dma_info *info)
-{
- return lower_32_bits(info->address + info->offset);
-}
-
-static u32 ccp_addr_hi(struct ccp_dma_info *info)
-{
- return upper_32_bits(info->address + info->offset) & 0x0000ffff;
-}
-
-static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
-{
- struct ccp_cmd_queue *cmd_q = op->cmd_q;
- struct ccp_device *ccp = cmd_q->ccp;
- void __iomem *cr_addr;
- u32 cr0, cmd;
- unsigned int i;
- int ret = 0;
-
- /* We could read a status register to see how many free slots
- * are actually available, but reading that register resets it
- * and you could lose some error information.
- */
- cmd_q->free_slots--;
-
- cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
- | (op->jobid << REQ0_JOBID_SHIFT)
- | REQ0_WAIT_FOR_WRITE;
-
- if (op->soc)
- cr0 |= REQ0_STOP_ON_COMPLETE
- | REQ0_INT_ON_COMPLETE;
-
- if (op->ioc || !cmd_q->free_slots)
- cr0 |= REQ0_INT_ON_COMPLETE;
-
- /* Start at CMD_REQ1 */
- cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
-
- mutex_lock(&ccp->req_mutex);
-
- /* Write CMD_REQ1 through CMD_REQx first */
- for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
- iowrite32(*(cr + i), cr_addr);
-
- /* Tell the CCP to start */
- wmb();
- iowrite32(cr0, ccp->io_regs + CMD_REQ0);
-
- mutex_unlock(&ccp->req_mutex);
-
- if (cr0 & REQ0_INT_ON_COMPLETE) {
- /* Wait for the job to complete */
- ret = wait_event_interruptible(cmd_q->int_queue,
- cmd_q->int_rcvd);
- if (ret || cmd_q->cmd_error) {
- /* On error delete all related jobs from the queue */
- cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
- | op->jobid;
-
- iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
-
- if (!ret)
- ret = -EIO;
- } else if (op->soc) {
- /* Delete just head job from the queue on SoC */
- cmd = DEL_Q_ACTIVE
- | (cmd_q->id << DEL_Q_ID_SHIFT)
- | op->jobid;
-
- iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
- }
-
- cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
-
- cmd_q->int_rcvd = 0;
- }
-
- return ret;
-}
-
-static int ccp_perform_aes(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
- | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
- | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
- | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- if (op->u.aes.mode == CCP_AES_MODE_CFB)
- cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- if (op->init)
- cr[0] |= REQ1_INIT;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_xts_aes(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
- | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
- | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- if (op->init)
- cr[0] |= REQ1_INIT;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_sha(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
- | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
- | REQ1_INIT;
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
-
- if (op->eom) {
- cr[0] |= REQ1_EOM;
- cr[4] = lower_32_bits(op->u.sha.msg_bits);
- cr[5] = upper_32_bits(op->u.sha.msg_bits);
- } else {
- cr[4] = 0;
- cr[5] = 0;
- }
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_rsa(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
- | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
- | REQ1_EOM;
- cr[1] = op->u.rsa.input_len - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_passthru(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
- | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
- | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
-
- if (op->src.type == CCP_MEMTYPE_SYSTEM)
- cr[1] = op->src.u.dma.length - 1;
- else
- cr[1] = op->dst.u.dma.length - 1;
-
- if (op->src.type == CCP_MEMTYPE_SYSTEM) {
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
-
- if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
- cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
- } else {
- cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
- cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
- }
-
- if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
- } else {
- cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
- cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
- }
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_ecc(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = REQ1_ECC_AFFINE_CONVERT
- | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
- | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
- | REQ1_EOM;
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
{
int start;
@@ -837,7 +478,7 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
op.u.passthru.byte_swap = byte_swap;
- return ccp_perform_passthru(&op);
+ return cmd_q->ccp->vdata->perform->perform_passthru(&op);
}
static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
@@ -969,7 +610,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
}
}
- ret = ccp_perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
@@ -1131,7 +772,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.soc = 1;
}
- ret = ccp_perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1296,7 +937,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (!src.sg_wa.bytes_left)
op.eom = 1;
- ret = ccp_perform_xts_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1453,7 +1094,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (sha->final && !src.sg_wa.bytes_left)
op.eom = 1;
- ret = ccp_perform_sha(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_data;
@@ -1633,7 +1274,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.rsa.mod_size = rsa->key_size;
op.u.rsa.input_len = i_len;
- ret = ccp_perform_rsa(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1758,7 +1399,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.u.dma.offset = dst.sg_wa.sg_used;
op.dst.u.dma.length = op.src.u.dma.length;
- ret = ccp_perform_passthru(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1870,7 +1511,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = ccp_perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -2034,7 +1675,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = ccp_perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 7690467c42f8..0bf262e36b6b 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -59,9 +59,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
ccp_pci->msix_count = ret;
for (v = 0; v < ccp_pci->msix_count; v++) {
/* Set the interrupt names and request the irqs */
- snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
+ snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
+ ccp->name, v);
ccp_pci->msix[v].vector = msix_entry[v].vector;
- ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
+ ret = request_irq(ccp_pci->msix[v].vector,
+ ccp->vdata->perform->irqhandler,
0, ccp_pci->msix[v].name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
@@ -94,7 +96,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
return ret;
ccp->irq = pdev->irq;
- ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
+ ccp->name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
@@ -179,6 +182,12 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto e_err;
ccp->dev_specific = ccp_pci;
+ ccp->vdata = (struct ccp_vdata *)id->driver_data;
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -221,7 +230,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(dev, ccp);
- ret = ccp_init(ccp);
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_iomap;
@@ -251,7 +260,7 @@ static void ccp_pci_remove(struct pci_dev *pdev)
if (!ccp)
return;
- ccp_destroy(ccp);
+ ccp->vdata->perform->destroy(ccp);
pci_iounmap(pdev, ccp->io_map);
@@ -312,7 +321,7 @@ static int ccp_pci_resume(struct pci_dev *pdev)
#endif
static const struct pci_device_id ccp_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x1537), },
+ { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index 66dd7c9d08c3..351f28d8c336 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -32,6 +32,33 @@ struct ccp_platform {
int coherent;
};
+static const struct acpi_device_id ccp_acpi_match[];
+static const struct of_device_id ccp_of_match[];
+
+static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(ccp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct ccp_vdata *)match->data;
+#endif
+ return 0;
+}
+
+static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(ccp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct ccp_vdata *)match->driver_data;
+#endif
+ return 0;
+}
+
static int ccp_get_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
@@ -43,7 +70,8 @@ static int ccp_get_irq(struct ccp_device *ccp)
return ret;
ccp->irq = ret;
- ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
+ ccp->name, dev);
if (ret) {
dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
return ret;
@@ -106,6 +134,13 @@ static int ccp_platform_probe(struct platform_device *pdev)
goto e_err;
ccp->dev_specific = ccp_platform;
+ ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
+ : ccp_get_acpi_version(pdev);
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -137,7 +172,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ccp);
- ret = ccp_init(ccp);
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_err;
@@ -155,7 +190,7 @@ static int ccp_platform_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
- ccp_destroy(ccp);
+ ccp->vdata->perform->destroy(ccp);
dev_notice(dev, "disabled\n");
@@ -214,7 +249,7 @@ static int ccp_platform_resume(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id ccp_acpi_match[] = {
- { "AMDI0C00", 0 },
+ { "AMDI0C00", (kernel_ulong_t)&ccpv3 },
{ },
};
MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
@@ -222,7 +257,8 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id ccp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a" },
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&ccpv3 },
{ },
};
MODULE_DEVICE_TABLE(of, ccp_of_match);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index e52496a172d0..2296934455fc 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1031,6 +1031,18 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ buf = chainup_buffers(dev, req->src, crypt->auth_len,
+ &src_hook, flags, src_direction);
+ req_ctx->src = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
+ if (!buf)
+ goto free_buf_src;
+
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+
req_ctx->dst = NULL;
if (req->src != req->dst) {
@@ -1055,20 +1067,6 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
}
- buf = chainup_buffers(dev, req->src, crypt->auth_len,
- &src_hook, flags, src_direction);
- req_ctx->src = src_hook.next;
- crypt->src_buf = src_hook.phys_next;
- if (!buf)
- goto free_buf_src;
-
- if (!encrypt || !req_ctx->dst) {
- lastlen = buf->buf_len;
- if (lastlen >= authsize)
- crypt->icv_rev_aes = buf->phys_addr +
- buf->buf_len - authsize;
- }
-
if (unlikely(lastlen < authsize)) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index dd355bd19474..d420ec751c7c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -36,6 +36,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -152,13 +153,10 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- spinlock_t lock;
- struct crypto_queue queue;
-
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
struct ablkcipher_request *req;
+ struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
@@ -532,9 +530,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- dd->flags &= ~FLAGS_BUSY;
-
- req->base.complete(&req->base, err);
+ crypto_finalize_request(dd->engine, req, err);
}
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -604,34 +600,25 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_aes_ctx *ctx;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0, len;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ return crypto_transfer_request_to_engine(dd->engine, req);
- if (!async_req)
- return ret;
+ return 0;
+}
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+ struct omap_aes_reqctx *rctx;
+ int len;
- req = ablkcipher_request_cast(async_req);
+ if (!dd)
+ return -ENODEV;
/* assign new request to device */
dd->req = req;
@@ -662,16 +649,20 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->ctx = ctx;
ctx->dd = dd;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
- if (err) {
- /* aes_task will not finish it, so do it here */
- omap_aes_finish_req(dd, err);
- tasklet_schedule(&dd->queue_task);
- }
+ return omap_aes_write_ctrl(dd);
+}
- return ret; /* return ret, which is enqueue return value */
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+
+ if (!dd)
+ return -ENODEV;
+
+ return omap_aes_crypt_dma_start(dd);
}
static void omap_aes_done_task(unsigned long data)
@@ -704,18 +695,10 @@ static void omap_aes_done_task(unsigned long data)
}
omap_aes_finish_req(dd, 0);
- omap_aes_handle_queue(dd, NULL);
pr_debug("exit\n");
}
-static void omap_aes_queue_task(unsigned long data)
-{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-
- omap_aes_handle_queue(dd, NULL);
-}
-
static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -1175,9 +1158,6 @@ static int omap_aes_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- spin_lock_init(&dd->lock);
- crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
-
err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
omap_aes_get_res_pdev(dd, pdev, &res);
if (err)
@@ -1209,7 +1189,6 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
err = omap_aes_dma_init(dd);
if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
@@ -1250,7 +1229,20 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
+ /* Initialize crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine)
+ goto err_algs;
+
+ dd->engine->prepare_request = omap_aes_prepare_req;
+ dd->engine->crypt_one_request = omap_aes_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
return 0;
+err_engine:
+ crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
@@ -1260,7 +1252,6 @@ err_algs:
omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
pm_runtime_disable(dev);
err_res:
dd = NULL;
@@ -1286,8 +1277,8 @@ static int omap_aes_remove(struct platform_device *pdev)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
+ crypto_engine_exit(dd->engine);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
dd = NULL;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index f96d427e502c..5a07208ce778 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -55,8 +55,8 @@
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c62x"
-#define ADF_C62XVF_DEVICE_NAME "c62xvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index e78a1d7d88fc..b40d9c8dad96 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -121,7 +121,6 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
- adf_dev_restore(accel_dev);
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
index ef5988afd4c6..b5484bfa6996 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -58,7 +58,7 @@ struct adf_user_cfg_key_val {
uint64_t padding3;
};
enum adf_cfg_val_type type;
-};
+} __packed;
struct adf_user_cfg_section {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
@@ -70,7 +70,7 @@ struct adf_user_cfg_section {
struct adf_user_cfg_section *next;
uint64_t padding3;
};
-};
+} __packed;
struct adf_user_cfg_ctl_data {
union {
@@ -78,5 +78,5 @@ struct adf_user_cfg_ctl_data {
uint64_t padding;
};
uint8_t device_id;
-};
+} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index f267d9e42e0b..d7dd18d9bef8 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -49,7 +49,6 @@
#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
-#define ADF_ARB_REQ_RING_NUM 8
#define ADF_ARB_REG_SIZE 0x4
#define ADF_ARB_WTR_SIZE 0x20
#define ADF_ARB_OFFSET 0x30000
@@ -64,15 +63,6 @@
ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
(ADF_ARB_REG_SLOT * index), value)
-#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
- ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
- (ADF_ARB_REG_SIZE * index), value)
-
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
(ADF_ARB_REG_SIZE * index), value)
@@ -99,15 +89,6 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
for (arb = 0; arb < ADF_ARB_NUM; arb++)
WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
- /* Setup service weighting */
- for (arb = 0; arb < ADF_ARB_NUM; arb++)
- for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
- WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
-
- /* Setup ring response ordering */
- for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
- WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
-
/* Setup worker queue registers */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, i);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index d97db990955d..5d1ee7e53492 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -112,27 +112,27 @@ enum icp_qat_uof_mem_region {
};
enum icp_qat_uof_regtype {
- ICP_NO_DEST,
- ICP_GPA_REL,
- ICP_GPA_ABS,
- ICP_GPB_REL,
- ICP_GPB_ABS,
- ICP_SR_REL,
- ICP_SR_RD_REL,
- ICP_SR_WR_REL,
- ICP_SR_ABS,
- ICP_SR_RD_ABS,
- ICP_SR_WR_ABS,
- ICP_DR_REL,
- ICP_DR_RD_REL,
- ICP_DR_WR_REL,
- ICP_DR_ABS,
- ICP_DR_RD_ABS,
- ICP_DR_WR_ABS,
- ICP_LMEM,
- ICP_LMEM0,
- ICP_LMEM1,
- ICP_NEIGH_REL,
+ ICP_NO_DEST = 0,
+ ICP_GPA_REL = 1,
+ ICP_GPA_ABS = 2,
+ ICP_GPB_REL = 3,
+ ICP_GPB_ABS = 4,
+ ICP_SR_REL = 5,
+ ICP_SR_RD_REL = 6,
+ ICP_SR_WR_REL = 7,
+ ICP_SR_ABS = 8,
+ ICP_SR_RD_ABS = 9,
+ ICP_SR_WR_ABS = 10,
+ ICP_DR_REL = 19,
+ ICP_DR_RD_REL = 20,
+ ICP_DR_WR_REL = 21,
+ ICP_DR_ABS = 22,
+ ICP_DR_RD_ABS = 23,
+ ICP_DR_WR_ABS = 24,
+ ICP_LMEM = 26,
+ ICP_LMEM0 = 27,
+ ICP_LMEM1 = 28,
+ ICP_NEIGH_REL = 31,
};
enum icp_qat_css_fwtype {
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59e4c3af15ed..1e8852a8a057 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1064,8 +1064,7 @@ static int qat_alg_aead_init(struct crypto_aead *tfm,
if (IS_ERR(ctx->hash_tfm))
return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
- crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
- sizeof(struct qat_crypto_request));
+ crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
@@ -1114,8 +1113,7 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->lock);
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
- sizeof(struct qat_crypto_request);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
ctx->tfm = tfm;
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 51c594fdacdc..e5c0727d4876 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -340,14 +340,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
if (!ret)
return -EINPROGRESS;
-unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.enc.m);
- else
- if (!dma_mapping_error(dev, qat_req->in.enc.m))
- dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
- DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
@@ -356,15 +358,14 @@ unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.enc.c))
dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
DMA_FROM_DEVICE);
-unmap_in_params:
- if (!dma_mapping_error(dev, qat_req->phy_in))
- dma_unmap_single(dev, qat_req->phy_in,
- sizeof(struct qat_rsa_input_params),
- DMA_TO_DEVICE);
- if (!dma_mapping_error(dev, qat_req->phy_out))
- dma_unmap_single(dev, qat_req->phy_out,
- sizeof(struct qat_rsa_output_params),
- DMA_TO_DEVICE);
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.enc.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.enc.m))
+ dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
return ret;
}
@@ -472,14 +473,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
if (!ret)
return -EINPROGRESS;
-unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.dec.c);
- else
- if (!dma_mapping_error(dev, qat_req->in.dec.c))
- dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
- DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
@@ -488,15 +491,14 @@ unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.dec.m))
dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
DMA_FROM_DEVICE);
-unmap_in_params:
- if (!dma_mapping_error(dev, qat_req->phy_in))
- dma_unmap_single(dev, qat_req->phy_in,
- sizeof(struct qat_rsa_input_params),
- DMA_TO_DEVICE);
- if (!dma_mapping_error(dev, qat_req->phy_out))
- dma_unmap_single(dev, qat_req->phy_out,
- sizeof(struct qat_rsa_output_params),
- DMA_TO_DEVICE);
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.dec.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dec.c))
+ dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 25d15f19c2b3..9b961b37a282 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -688,7 +688,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
int mflag = 0;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- for (ae = 0; ae <= max_ae; ae++) {
+ for (ae = 0; ae < max_ae; ae++) {
if (!test_bit(ae,
(unsigned long *)&handle->hal_handle->ae_mask))
continue;
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
index 7051c6c715f3..30f91297b4b6 100644
--- a/drivers/crypto/rockchip/Makefile
+++ b/drivers/crypto/rockchip/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
rk3288_crypto_ablkcipher.o \
+ rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index da9c73dce4af..af508258d2ea 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -208,6 +208,8 @@ static void rk_crypto_tasklet_cb(unsigned long data)
if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
dev->ablk_req = ablkcipher_request_cast(async_req);
+ else
+ dev->ahash_req = ahash_request_cast(async_req);
err = dev->start(dev);
if (err)
dev->complete(dev, err);
@@ -220,6 +222,9 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_cbc_des_alg,
&rk_ecb_des3_ede_alg,
&rk_cbc_des3_ede_alg,
+ &rk_ahash_sha1,
+ &rk_ahash_sha256,
+ &rk_ahash_md5,
};
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
@@ -229,15 +234,24 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
- err = crypto_register_alg(&rk_cipher_algs[i]->alg);
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ err = crypto_register_alg(
+ &rk_cipher_algs[i]->alg.crypto);
+ else
+ err = crypto_register_ahash(
+ &rk_cipher_algs[i]->alg.hash);
if (err)
goto err_cipher_algs;
}
return 0;
err_cipher_algs:
- for (k = 0; k < i; k++)
- crypto_unregister_alg(&rk_cipher_algs[k]->alg);
+ for (k = 0; k < i; k++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
return err;
}
@@ -245,8 +259,12 @@ static void rk_crypto_unregister(void)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
- crypto_unregister_alg(&rk_cipher_algs[i]->alg);
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
}
static void rk_crypto_action(void *data)
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index e499c2c6c903..d7b71fea320b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -6,6 +6,10 @@
#include <crypto/algapi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <crypto/internal/hash.h>
+
+#include <crypto/md5.h>
+#include <crypto/sha.h>
#define _SBF(v, f) ((v) << (f))
@@ -149,6 +153,28 @@
#define RK_CRYPTO_TDES_KEY3_0 0x0130
#define RK_CRYPTO_TDES_KEY3_1 0x0134
+/* HASH */
+#define RK_CRYPTO_HASH_CTRL 0x0180
+#define RK_CRYPTO_HASH_SWAP_DO BIT(3)
+#define RK_CRYPTO_HASH_SWAP_DI BIT(2)
+#define RK_CRYPTO_HASH_SHA1 _SBF(0x00, 0)
+#define RK_CRYPTO_HASH_MD5 _SBF(0x01, 0)
+#define RK_CRYPTO_HASH_SHA256 _SBF(0x02, 0)
+#define RK_CRYPTO_HASH_PRNG _SBF(0x03, 0)
+
+#define RK_CRYPTO_HASH_STS 0x0184
+#define RK_CRYPTO_HASH_DONE BIT(0)
+
+#define RK_CRYPTO_HASH_MSG_LEN 0x0188
+#define RK_CRYPTO_HASH_DOUT_0 0x018c
+#define RK_CRYPTO_HASH_DOUT_1 0x0190
+#define RK_CRYPTO_HASH_DOUT_2 0x0194
+#define RK_CRYPTO_HASH_DOUT_3 0x0198
+#define RK_CRYPTO_HASH_DOUT_4 0x019c
+#define RK_CRYPTO_HASH_DOUT_5 0x01a0
+#define RK_CRYPTO_HASH_DOUT_6 0x01a4
+#define RK_CRYPTO_HASH_DOUT_7 0x01a8
+
#define CRYPTO_READ(dev, offset) \
readl_relaxed(((dev)->reg + (offset)))
#define CRYPTO_WRITE(dev, offset, val) \
@@ -166,6 +192,7 @@ struct rk_crypto_info {
struct crypto_queue queue;
struct tasklet_struct crypto_tasklet;
struct ablkcipher_request *ablk_req;
+ struct ahash_request *ahash_req;
/* device lock */
spinlock_t lock;
@@ -195,15 +222,36 @@ struct rk_crypto_info {
void (*unload_data)(struct rk_crypto_info *dev);
};
+/* the private variable of hash */
+struct rk_ahash_ctx {
+ struct rk_crypto_info *dev;
+ /* for fallback */
+ struct crypto_ahash *fallback_tfm;
+};
+
+/* the privete variable of hash for fallback */
+struct rk_ahash_rctx {
+ struct ahash_request fallback_req;
+};
+
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
};
+enum alg_type {
+ ALG_TYPE_HASH,
+ ALG_TYPE_CIPHER,
+};
+
struct rk_crypto_tmp {
- struct rk_crypto_info *dev;
- struct crypto_alg alg;
+ struct rk_crypto_info *dev;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
+ enum alg_type type;
};
extern struct rk_crypto_tmp rk_ecb_aes_alg;
@@ -213,4 +261,8 @@ extern struct rk_crypto_tmp rk_cbc_des_alg;
extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
+extern struct rk_crypto_tmp rk_ahash_sha1;
+extern struct rk_crypto_tmp rk_ahash_sha256;
+extern struct rk_crypto_tmp rk_ahash_md5;
+
#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index d98b681f6c06..b5a3afe222e4 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -336,7 +336,7 @@ static int rk_ablk_cra_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct rk_crypto_tmp *algt;
- algt = container_of(alg, struct rk_crypto_tmp, alg);
+ algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
ctx->dev = algt->dev;
ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
@@ -357,7 +357,8 @@ static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-rk",
.cra_priority = 300,
@@ -381,7 +382,8 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-rk",
.cra_priority = 300,
@@ -406,7 +408,8 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
};
struct rk_crypto_tmp rk_ecb_des_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-rk",
.cra_priority = 300,
@@ -430,7 +433,8 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
};
struct rk_crypto_tmp rk_cbc_des_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-rk",
.cra_priority = 300,
@@ -455,7 +459,8 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-ede-rk",
.cra_priority = 300,
@@ -480,7 +485,8 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-ede-rk",
.cra_priority = 300,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
new file mode 100644
index 000000000000..718588219f75
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -0,0 +1,404 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+/*
+ * IC can not process zero message hash,
+ * so we put the fixed hash out when met zero message.
+ */
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int rk_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (rk_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
+ break;
+ case MD5_DIGEST_SIZE:
+ memcpy(req->result, md5_zero_message_hash, rk_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
+{
+ if (dev->ahash_req->base.complete)
+ dev->ahash_req->base.complete(&dev->ahash_req->base, err);
+}
+
+static void rk_ahash_reg_init(struct rk_crypto_info *dev)
+{
+ int reg_status = 0;
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+ RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
+ reg_status &= (~RK_CRYPTO_HASH_FLUSH);
+ reg_status |= _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
+ RK_CRYPTO_HRDMA_DONE_ENA);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
+ RK_CRYPTO_HRDMA_DONE_INT);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
+ RK_CRYPTO_HASH_SWAP_DO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO |
+ RK_CRYPTO_BYTESWAP_BTFIFO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
+}
+
+static int rk_ahash_init(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int rk_ahash_update(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int rk_ahash_final(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int rk_ahash_finup(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int rk_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int rk_ahash_export(struct ahash_request *req, void *out)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int rk_ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct rk_crypto_info *dev = NULL;
+ unsigned long flags;
+ int ret;
+
+ if (!req->nbytes)
+ return zero_message_process(req);
+
+ dev = tctx->dev;
+ dev->total = req->nbytes;
+ dev->left_bytes = req->nbytes;
+ dev->aligned = 0;
+ dev->mode = 0;
+ dev->align_size = 4;
+ dev->sg_dst = NULL;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rk_ahash_reg_init(dev);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = crypto_enqueue_request(&dev->queue, &req->base);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ tasklet_schedule(&dev->crypto_tasklet);
+
+ /*
+ * it will take some time to process date after last dma transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10-50 makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+ usleep_range(10, 50);
+
+ memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+ crypto_ahash_digestsize(tfm));
+
+ return 0;
+}
+
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+ (RK_CRYPTO_HASH_START << 16));
+}
+
+static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
+{
+ int err;
+
+ err = dev->load_data(dev, dev->sg_src, NULL);
+ if (!err)
+ crypto_ahash_dma_start(dev);
+ return err;
+}
+
+static int rk_ahash_start(struct rk_crypto_info *dev)
+{
+ return rk_ahash_set_data_start(dev);
+}
+
+static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+{
+ int err = 0;
+
+ dev->unload_data(dev);
+ if (dev->left_bytes) {
+ if (dev->aligned) {
+ if (sg_is_last(dev->sg_src)) {
+ dev_warn(dev->dev, "[%s:%d], Lack of data\n",
+ __func__, __LINE__);
+ err = -ENOMEM;
+ goto out_rx;
+ }
+ dev->sg_src = sg_next(dev->sg_src);
+ }
+ err = rk_ahash_set_data_start(dev);
+ } else {
+ dev->complete(dev, 0);
+ }
+
+out_rx:
+ return err;
+}
+
+static int rk_cra_hash_init(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct rk_crypto_tmp *algt;
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ tctx->dev = algt->dev;
+ tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
+ if (!tctx->dev->addr_vir) {
+ dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
+ return -ENOMEM;
+ }
+ tctx->dev->start = rk_ahash_start;
+ tctx->dev->update = rk_ahash_crypto_rx;
+ tctx->dev->complete = rk_ahash_crypto_complete;
+
+ /* for fallback */
+ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback_tfm)) {
+ dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ return PTR_ERR(tctx->fallback_tfm);
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct rk_ahash_rctx) +
+ crypto_ahash_reqsize(tctx->fallback_tfm));
+
+ return tctx->dev->enable_clk(tctx->dev);
+}
+
+static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ free_page((unsigned long)tctx->dev->addr_vir);
+ return tctx->dev->disable_clk(tctx->dev);
+}
+
+struct rk_crypto_tmp rk_ahash_sha1 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "rk-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_sha256 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "rk-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_md5 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "rk-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index f214a8755827..5f161a9777e3 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -224,6 +224,7 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
{
if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
const struct of_device_id *match;
+
match = of_match_node(s5p_sss_dt_match,
pdev->dev.of_node);
return (struct samsung_aes_variant *)match->data;
@@ -382,7 +383,7 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
void __iomem *keystart;
if (iv)
- memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -391,13 +392,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
else
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
- memcpy(keystart, key, keylen);
+ memcpy_toio(keystart, key, keylen);
}
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
-
uint32_t aes_control;
int err;
unsigned long flags;
@@ -518,7 +518,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_dev *dev = ctx->dev;
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
+ dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -566,7 +566,7 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->dev = s5p_dev;
tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
@@ -701,7 +701,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_algs;
}
- pr_info("s5p-sss driver registered\n");
+ dev_info(dev, "s5p-sss driver registered\n");
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 6c4f91c5e6b3..c3f3d89e4831 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -182,7 +182,6 @@ struct sahara_sha_reqctx {
u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
u8 context[SHA256_DIGEST_SIZE + 4];
- struct mutex mutex;
unsigned int mode;
unsigned int digest_size;
unsigned int context_size;
@@ -1096,7 +1095,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
if (!req->nbytes && !last)
return 0;
- mutex_lock(&rctx->mutex);
rctx->last = last;
if (!rctx->active) {
@@ -1109,7 +1107,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
mutex_unlock(&dev->queue_mutex);
wake_up_process(dev->kthread);
- mutex_unlock(&rctx->mutex);
return ret;
}
@@ -1137,8 +1134,6 @@ static int sahara_sha_init(struct ahash_request *req)
rctx->context_size = rctx->digest_size + 4;
rctx->active = 0;
- mutex_init(&rctx->mutex);
-
return 0;
}
@@ -1167,26 +1162,18 @@ static int sahara_sha_digest(struct ahash_request *req)
static int sahara_sha_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- memcpy(out, ctx, sizeof(struct sahara_ctx));
- memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
- sizeof(struct sahara_sha_reqctx));
+ memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
return 0;
}
static int sahara_sha_import(struct ahash_request *req, const void *in)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- memcpy(ctx, in, sizeof(struct sahara_ctx));
- memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
- sizeof(struct sahara_sha_reqctx));
+ memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
return 0;
}
@@ -1272,6 +1259,7 @@ static struct ahash_alg sha_v3_algs[] = {
.export = sahara_sha_export,
.import = sahara_sha_import,
.halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
@@ -1299,6 +1287,7 @@ static struct ahash_alg sha_v4_algs[] = {
.export = sahara_sha_export,
.import = sahara_sha_import,
.halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index a19ee127edca..7be3fbcd8d78 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -251,11 +251,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+ dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->nbytes, rx_cnt,
- oo, mo.length, oleft, areq->nbytes, tx_cnt,
- todo, ob);
+ oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
if (tx_cnt == 0)
continue;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 4c243c1ffc7f..790f7cadc1ed 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index d6fdc583ce5d..574e87c7f2b8 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
spin_lock_init(&device_data->ctx_lock);