summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/crypto_engine.rst48
-rw-r--r--Documentation/crypto/devel-algos.rst8
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt3
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt6
-rw-r--r--Documentation/devicetree/bindings/rng/imx-rng.txt (renamed from Documentation/devicetree/bindings/rng/imx-rngc.txt)11
-rw-r--r--Documentation/devicetree/bindings/rng/ks-sa-rng.txt21
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt7
-rw-r--r--Documentation/devicetree/bindings/rng/st,stm32-rng.txt4
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/crypto/Kconfig6
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-cipher-core.S19
-rw-r--r--arch/arm/crypto/speck-neon-core.S432
-rw-r--r--arch/arm/crypto/speck-neon-glue.c288
-rw-r--r--arch/arm64/crypto/Kconfig6
-rw-r--r--arch/arm64/crypto/Makefile8
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c47
-rw-r--r--arch/arm64/crypto/aes-glue.c95
-rw-r--r--arch/arm64/crypto/aes-modes.S355
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c48
-rw-r--r--arch/arm64/crypto/chacha20-neon-glue.c12
-rw-r--r--arch/arm64/crypto/sha256-glue.c36
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c282
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S1404
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c230
-rw-r--r--arch/x86/crypto/blowfish_glue.c230
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c491
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c495
-rw-r--r--arch/x86/crypto/camellia_glue.c356
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c352
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c489
-rw-r--r--arch/x86/crypto/des3_ede_glue.c238
-rw-r--r--arch/x86/crypto/glue_helper.c391
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c478
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c518
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c519
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c28
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h8
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c27
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h8
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c30
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h8
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c493
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c339
-rw-r--r--arch/x86/include/asm/crypto/camellia.h16
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h75
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h17
-rw-r--r--arch/x86/include/asm/crypto/twofish.h19
-rw-r--r--crypto/Kconfig129
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/ablk_helper.c150
-rw-r--r--crypto/ahash.c25
-rw-r--r--crypto/algapi.c8
-rw-r--r--crypto/api.c34
-rw-r--r--crypto/cfb.c353
-rw-r--r--crypto/crypto_engine.c301
-rw-r--r--crypto/crypto_user.c2
-rw-r--r--crypto/ecc.c23
-rw-r--r--crypto/ecdh.c23
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/lrw.c154
-rw-r--r--crypto/mcryptd.c34
-rw-r--r--crypto/md4.c17
-rw-r--r--crypto/md5.c17
-rw-r--r--crypto/rsa-pkcs1pad.c2
-rw-r--r--crypto/simd.c50
-rw-r--r--crypto/sm4_generic.c244
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/tcrypt.c3
-rw-r--r--crypto/testmgr.c45
-rw-r--r--crypto/testmgr.h1882
-rw-r--r--crypto/xts.c72
-rw-r--r--drivers/char/hw_random/Kconfig7
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c2
-rw-r--r--drivers/char/hw_random/cavium-rng.c2
-rw-r--r--drivers/char/hw_random/imx-rngc.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c257
-rw-r--r--drivers/char/hw_random/mxc-rnga.c23
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/char/hw_random/stm32-rng.c44
-rw-r--r--drivers/crypto/Kconfig34
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/atmel-sha.c9
-rw-r--r--drivers/crypto/atmel-tdes.c9
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/bfin_crc.c743
-rw-r--r--drivers/crypto/bfin_crc.h124
-rw-r--r--drivers/crypto/caam/caamalg.c21
-rw-r--r--drivers/crypto/caam/caamalg_desc.c165
-rw-r--r--drivers/crypto/caam/caamalg_desc.h24
-rw-r--r--drivers/crypto/caam/caamalg_qi.c388
-rw-r--r--drivers/crypto/caam/ctrl.c42
-rw-r--r--drivers/crypto/caam/qi.c11
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c7
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c108
-rw-r--r--drivers/crypto/ccp/psp-dev.c15
-rw-r--r--drivers/crypto/ccp/sp-dev.c6
-rw-r--r--drivers/crypto/ccree/Makefile7
-rw-r--r--drivers/crypto/ccree/cc_aead.c2718
-rw-r--r--drivers/crypto/ccree/cc_aead.h109
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c1651
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h71
-rw-r--r--drivers/crypto/ccree/cc_cipher.c1150
-rw-r--r--drivers/crypto/ccree/cc_cipher.h59
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h133
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c101
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h32
-rw-r--r--drivers/crypto/ccree/cc_driver.c518
-rw-r--r--drivers/crypto/ccree/cc_driver.h208
-rw-r--r--drivers/crypto/ccree/cc_fips.c120
-rw-r--r--drivers/crypto/ccree/cc_fips.h36
-rw-r--r--drivers/crypto/ccree/cc_hash.c2296
-rw-r--r--drivers/crypto/ccree/cc_hash.h109
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h145
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h576
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c279
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h55
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h168
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h59
-rw-r--r--drivers/crypto/ccree/cc_pm.c122
-rw-r--r--drivers/crypto/ccree/cc_pm.h56
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c711
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h51
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c120
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h65
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c577
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h11
-rw-r--r--drivers/crypto/chelsio/chcr_core.h6
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h31
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel.c114
-rw-r--r--drivers/crypto/inside-secure/safexcel.h22
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c258
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c12
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c5
-rw-r--r--drivers/crypto/omap-aes.c112
-rw-r--r--drivers/crypto/omap-aes.h3
-rw-r--r--drivers/crypto/omap-crypto.c4
-rw-r--r--drivers/crypto/omap-des.c24
-rw-r--r--drivers/crypto/omap-sham.c106
-rw-r--r--drivers/crypto/picoxcell_crypto.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c9
-rw-r--r--drivers/crypto/s5p-sss.c34
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c964
-rw-r--r--drivers/crypto/stm32/stm32-hash.c41
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c1
-rw-r--r--drivers/crypto/talitos.c218
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c14
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c16
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c3
-rw-r--r--drivers/staging/ccree/Kconfig4
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--include/crypto/ablk_helper.h32
-rw-r--r--include/crypto/algapi.h1
-rw-r--r--include/crypto/engine.h68
-rw-r--r--include/crypto/hash.h11
-rw-r--r--include/crypto/internal/hash.h5
-rw-r--r--include/crypto/internal/simd.h7
-rw-r--r--include/crypto/lrw.h44
-rw-r--r--include/crypto/sm4.h28
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/crypto/xts.h17
-rw-r--r--include/linux/byteorder/generic.h17
-rw-r--r--include/linux/crypto.h8
183 files changed, 22264 insertions, 7763 deletions
diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst
new file mode 100644
index 000000000000..8272ac92a14f
--- /dev/null
+++ b/Documentation/crypto/crypto_engine.rst
@@ -0,0 +1,48 @@
+=============
+CRYPTO ENGINE
+=============
+
+Overview
+--------
+The crypto engine API (CE), is a crypto queue manager.
+
+Requirement
+-----------
+You have to put at start of your tfm_ctx the struct crypto_engine_ctx
+struct your_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ ...
+};
+Why: Since CE manage only crypto_async_request, it cannot know the underlying
+request_type and so have access only on the TFM.
+So using container_of for accessing __ctx is impossible.
+Furthermore, the crypto engine cannot know the "struct your_tfm_ctx",
+so it must assume that crypto_engine_ctx is at start of it.
+
+Order of operations
+-------------------
+You have to obtain a struct crypto_engine via crypto_engine_alloc_init().
+And start it via crypto_engine_start().
+
+Before transferring any request, you have to fill the enginectx.
+- prepare_request: (taking a function pointer) If you need to do some processing before doing the request
+- unprepare_request: (taking a function pointer) Undoing what's done in prepare_request
+- do_one_request: (taking a function pointer) Do encryption for current request
+
+Note: that those three functions get the crypto_async_request associated with the received request.
+So your need to get the original request via container_of(areq, struct yourrequesttype_request, base);
+
+When your driver receive a crypto_request, you have to transfer it to
+the cryptoengine via one of:
+- crypto_transfer_ablkcipher_request_to_engine()
+- crypto_transfer_aead_request_to_engine()
+- crypto_transfer_akcipher_request_to_engine()
+- crypto_transfer_hash_request_to_engine()
+- crypto_transfer_skcipher_request_to_engine()
+
+At the end of the request process, a call to one of the following function is needed:
+- crypto_finalize_ablkcipher_request
+- crypto_finalize_aead_request
+- crypto_finalize_akcipher_request
+- crypto_finalize_hash_request
+- crypto_finalize_skcipher_request
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
index 66f50d32dcec..c45c6f400dbd 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -236,6 +236,14 @@ when used from another part of the kernel.
|
'---------------> HASH2
+Note that it is perfectly legal to "abandon" a request object:
+- call .init() and then (as many times) .update()
+- _not_ call any of .final(), .finup() or .export() at any point in future
+
+In other words implementations should mind the resource allocation and clean-up.
+No resources related to request objects should remain allocated after a call
+to .init() or .update(), since there might be no chance to free them.
+
Specifics Of Asynchronous HASH Transformation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
index cec8d5d74e26..c2598ab27f2e 100644
--- a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -1,7 +1,8 @@
Arm TrustZone CryptoCell cryptographic engine
Required properties:
-- compatible: Should be "arm,cryptocell-712-ree".
+- compatible: Should be one of: "arm,cryptocell-712-ree",
+ "arm,cryptocell-710-ree" or "arm,cryptocell-630p-ree".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt number for the device.
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index 30c3ce6b502e..5dba55cdfa63 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -8,7 +8,11 @@ Required properties:
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
Optional properties:
-- clocks: Reference to the crypto engine clock.
+- clocks: Reference to the crypto engine clocks, the second clock is
+ needed for the Armada 7K/8K SoCs.
+- clock-names: mandatory if there is a second clock, in this case the
+ name must be "core" for the first clock and "reg" for
+ the second one.
Example:
diff --git a/Documentation/devicetree/bindings/rng/imx-rngc.txt b/Documentation/devicetree/bindings/rng/imx-rng.txt
index 93c7174a7bed..405c2b00ccb0 100644
--- a/Documentation/devicetree/bindings/rng/imx-rngc.txt
+++ b/Documentation/devicetree/bindings/rng/imx-rng.txt
@@ -1,15 +1,14 @@
-Freescale RNGC (Random Number Generator Version C)
-
-The driver also supports version B, which is mostly compatible
-to version C.
+Freescale RNGA/RNGB/RNGC (Random Number Generator Versions A, B and C)
Required properties:
- compatible : should be one of
+ "fsl,imx21-rnga"
+ "fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga")
"fsl,imx25-rngb"
"fsl,imx35-rngc"
- reg : offset and length of the register set of this block
-- interrupts : the interrupt number for the RNGC block
-- clocks : the RNGC clk source
+- interrupts : the interrupt number for the RNG block
+- clocks : the RNG clk source
Example:
diff --git a/Documentation/devicetree/bindings/rng/ks-sa-rng.txt b/Documentation/devicetree/bindings/rng/ks-sa-rng.txt
new file mode 100644
index 000000000000..b7a65b487901
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/ks-sa-rng.txt
@@ -0,0 +1,21 @@
+Keystone SoC Hardware Random Number Generator(HWRNG) Module
+
+On Keystone SoCs HWRNG module is a submodule of the Security Accelerator.
+
+- compatible: should be "ti,keystone-rng"
+- ti,syscon-sa-cfg: phandle to syscon node of the SA configuration registers.
+ This registers are shared between hwrng and crypto drivers.
+- clocks: phandle to the reference clocks for the subsystem
+- clock-names: functional clock name. Should be set to "fck"
+- reg: HWRNG module register space
+
+Example:
+/* K2HK */
+
+rng@24000 {
+ compatible = "ti,keystone-rng";
+ ti,syscon-sa-cfg = <&sa_config>;
+ clocks = <&clksa>;
+ clock-names = "fck";
+ reg = <0x24000 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 9cf7876ab434..ea434ce50f36 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -13,7 +13,12 @@ Required properties:
- interrupts : the interrupt number for the RNG module.
Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
- clocks: the trng clock source. Only mandatory for the
- "inside-secure,safexcel-eip76" compatible.
+ "inside-secure,safexcel-eip76" compatible, the second clock is
+ needed for the Armada 7K/8K SoCs
+- clock-names: mandatory if there is a second clock, in this case the
+ name must be "core" for the first clock and "reg" for the second
+ one
+
Example:
/* AM335x */
diff --git a/Documentation/devicetree/bindings/rng/st,stm32-rng.txt b/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
index 47f04176f93b..1dfa7d51e006 100644
--- a/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
+++ b/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
@@ -11,6 +11,10 @@ Required properties:
- interrupts : The designated IRQ line for the RNG
- clocks : The clock needed to enable the RNG
+Optional properties:
+- resets : The reset to properly start RNG
+- clock-error-detect : Enable the clock detection management
+
Example:
rng: rng@50060800 {
diff --git a/MAINTAINERS b/MAINTAINERS
index 2328eed6aea9..9d42bb8bb120 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3252,12 +3252,11 @@ F: drivers/net/ieee802154/cc2520.c
F: include/linux/spi/cc2520.h
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
-CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER
+CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
M: Gilad Ben-Yossef <gilad@benyossef.com>
L: linux-crypto@vger.kernel.org
-L: driverdev-devel@linuxdriverproject.org
S: Supported
-F: drivers/staging/ccree/
+F: drivers/crypto/ccree/
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
CEC FRAMEWORK
@@ -6962,7 +6961,7 @@ F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_
INSIDE SECURE CRYPTO DRIVER
-M: Antoine Tenart <antoine.tenart@free-electrons.com>
+M: Antoine Tenart <antoine.tenart@bootlin.com>
F: drivers/crypto/inside-secure/
S: Maintained
L: linux-crypto@vger.kernel.org
@@ -7200,6 +7199,14 @@ L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/i40iw/
+INTEL SHA MULTIBUFFER DRIVER
+M: Megha Dey <megha.dey@linux.intel.com>
+R: Tim Chen <tim.c.chen@linux.intel.com>
+L: linux-crypto@vger.kernel.org
+S: Supported
+F: arch/x86/crypto/sha*-mb
+F: crypto/mcryptd.c
+
INTEL TELEMETRY DRIVER
M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
L: platform-driver-x86@vger.kernel.org
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index b8e69fe282b8..925d1364727a 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -121,4 +121,10 @@ config CRYPTO_CHACHA20_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_CHACHA20
+config CRYPTO_SPECK_NEON
+ tristate "NEON accelerated Speck cipher algorithms"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_SPECK
+
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 30ef8e291271..3304e671918d 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
+obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -53,7 +54,9 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
+speck-neon-y := speck-neon-core.o speck-neon-glue.o
+ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
@@ -62,5 +65,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
$(call cmd,perl)
+endif
.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S
index 54b384084637..184d6c2d15d5 100644
--- a/arch/arm/crypto/aes-cipher-core.S
+++ b/arch/arm/crypto/aes-cipher-core.S
@@ -174,6 +174,16 @@
.ltorg
.endm
+ENTRY(__aes_arm_encrypt)
+ do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
+ENDPROC(__aes_arm_encrypt)
+
+ .align 5
+ENTRY(__aes_arm_decrypt)
+ do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
+ENDPROC(__aes_arm_decrypt)
+
+ .section ".rodata", "a"
.align L1_CACHE_SHIFT
.type __aes_arm_inverse_sbox, %object
__aes_arm_inverse_sbox:
@@ -210,12 +220,3 @@ __aes_arm_inverse_sbox:
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox
-
-ENTRY(__aes_arm_encrypt)
- do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
-ENDPROC(__aes_arm_encrypt)
-
- .align 5
-ENTRY(__aes_arm_decrypt)
- do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
-ENDPROC(__aes_arm_decrypt)
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
new file mode 100644
index 000000000000..3c1e203e53b9
--- /dev/null
+++ b/arch/arm/crypto/speck-neon-core.S
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
+ *
+ * Copyright (c) 2018 Google, Inc
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+ .text
+ .fpu neon
+
+ // arguments
+ ROUND_KEYS .req r0 // const {u64,u32} *round_keys
+ NROUNDS .req r1 // int nrounds
+ DST .req r2 // void *dst
+ SRC .req r3 // const void *src
+ NBYTES .req r4 // unsigned int nbytes
+ TWEAK .req r5 // void *tweak
+
+ // registers which hold the data being encrypted/decrypted
+ X0 .req q0
+ X0_L .req d0
+ X0_H .req d1
+ Y0 .req q1
+ Y0_H .req d3
+ X1 .req q2
+ X1_L .req d4
+ X1_H .req d5
+ Y1 .req q3
+ Y1_H .req d7
+ X2 .req q4
+ X2_L .req d8
+ X2_H .req d9
+ Y2 .req q5
+ Y2_H .req d11
+ X3 .req q6
+ X3_L .req d12
+ X3_H .req d13
+ Y3 .req q7
+ Y3_H .req d15
+
+ // the round key, duplicated in all lanes
+ ROUND_KEY .req q8
+ ROUND_KEY_L .req d16
+ ROUND_KEY_H .req d17
+
+ // index vector for vtbl-based 8-bit rotates
+ ROTATE_TABLE .req d18
+
+ // multiplication table for updating XTS tweaks
+ GF128MUL_TABLE .req d19
+ GF64MUL_TABLE .req d19
+
+ // current XTS tweak value(s)
+ TWEAKV .req q10
+ TWEAKV_L .req d20
+ TWEAKV_H .req d21
+
+ TMP0 .req q12
+ TMP0_L .req d24
+ TMP0_H .req d25
+ TMP1 .req q13
+ TMP2 .req q14
+ TMP3 .req q15
+
+ .align 4
+.Lror64_8_table:
+ .byte 1, 2, 3, 4, 5, 6, 7, 0
+.Lror32_8_table:
+ .byte 1, 2, 3, 0, 5, 6, 7, 4
+.Lrol64_8_table:
+ .byte 7, 0, 1, 2, 3, 4, 5, 6
+.Lrol32_8_table:
+ .byte 3, 0, 1, 2, 7, 4, 5, 6
+.Lgf128mul_table:
+ .byte 0, 0x87
+ .fill 14
+.Lgf64mul_table:
+ .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
+ .fill 12
+
+/*
+ * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
+ *
+ * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
+ * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
+ * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
+ *
+ * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
+ * the vtbl approach is faster on some processors and the same speed on others.
+ */
+.macro _speck_round_128bytes n
+
+ // x = ror(x, 8)
+ vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
+ vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
+ vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
+ vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
+ vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
+ vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
+ vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
+ vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
+
+ // x += y
+ vadd.u\n X0, Y0
+ vadd.u\n X1, Y1
+ vadd.u\n X2, Y2
+ vadd.u\n X3, Y3
+
+ // x ^= k
+ veor X0, ROUND_KEY
+ veor X1, ROUND_KEY
+ veor X2, ROUND_KEY
+ veor X3, ROUND_KEY
+
+ // y = rol(y, 3)
+ vshl.u\n TMP0, Y0, #3
+ vshl.u\n TMP1, Y1, #3
+ vshl.u\n TMP2, Y2, #3
+ vshl.u\n TMP3, Y3, #3
+ vsri.u\n TMP0, Y0, #(\n - 3)
+ vsri.u\n TMP1, Y1, #(\n - 3)
+ vsri.u\n TMP2, Y2, #(\n - 3)
+ vsri.u\n TMP3, Y3, #(\n - 3)
+
+ // y ^= x
+ veor Y0, TMP0, X0
+ veor Y1, TMP1, X1
+ veor Y2, TMP2, X2
+ veor Y3, TMP3, X3
+.endm
+
+/*
+ * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
+ *
+ * This is the inverse of _speck_round_128bytes().
+ */
+.macro _speck_unround_128bytes n
+
+ // y ^= x
+ veor TMP0, Y0, X0
+ veor TMP1, Y1, X1
+ veor TMP2, Y2, X2
+ veor TMP3, Y3, X3
+
+ // y = ror(y, 3)
+ vshr.u\n Y0, TMP0, #3
+ vshr.u\n Y1, TMP1, #3
+ vshr.u\n Y2, TMP2, #3
+ vshr.u\n Y3, TMP3, #3
+ vsli.u\n Y0, TMP0, #(\n - 3)
+ vsli.u\n Y1, TMP1, #(\n - 3)
+ vsli.u\n Y2, TMP2, #(\n - 3)
+ vsli.u\n Y3, TMP3, #(\n - 3)
+
+ // x ^= k
+ veor X0, ROUND_KEY
+ veor X1, ROUND_KEY
+ veor X2, ROUND_KEY
+ veor X3, ROUND_KEY
+
+ // x -= y
+ vsub.u\n X0, Y0
+ vsub.u\n X1, Y1
+ vsub.u\n X2, Y2
+ vsub.u\n X3, Y3
+
+ // x = rol(x, 8);
+ vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
+ vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
+ vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
+ vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
+ vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
+ vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
+ vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
+ vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
+.endm
+
+.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
+
+ // Load the next source block
+ vld1.8 {\dst_reg}, [SRC]!
+
+ // Save the current tweak in the tweak buffer
+ vst1.8 {TWEAKV}, [\tweak_buf:128]!
+
+ // XOR the next source block with the current tweak
+ veor \dst_reg, TWEAKV
+
+ /*
+ * Calculate the next tweak by multiplying the current one by x,
+ * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
+ */
+ vshr.u64 \tmp, TWEAKV, #63
+ vshl.u64 TWEAKV, #1
+ veor TWEAKV_H, \tmp\()_L
+ vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
+ veor TWEAKV_L, \tmp\()_H
+.endm
+
+.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
+
+ // Load the next two source blocks
+ vld1.8 {\dst_reg}, [SRC]!
+
+ // Save the current two tweaks in the tweak buffer
+ vst1.8 {TWEAKV}, [\tweak_buf:128]!
+
+ // XOR the next two source blocks with the current two tweaks
+ veor \dst_reg, TWEAKV
+
+ /*
+ * Calculate the next two tweaks by multiplying the current ones by x^2,
+ * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
+ */
+ vshr.u64 \tmp, TWEAKV, #62
+ vshl.u64 TWEAKV, #2
+ vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
+ vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
+ veor TWEAKV, \tmp
+.endm
+
+/*
+ * _speck_xts_crypt() - Speck-XTS encryption/decryption
+ *
+ * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
+ * using Speck-XTS, specifically the variant with a block size of '2n' and round
+ * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
+ * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
+ * nonzero multiple of 128.
+ */
+.macro _speck_xts_crypt n, decrypting
+ push {r4-r7}
+ mov r7, sp
+
+ /*
+ * The first four parameters were passed in registers r0-r3. Load the
+ * additional parameters, which were passed on the stack.
+ */
+ ldr NBYTES, [sp, #16]
+ ldr TWEAK, [sp, #20]
+
+ /*
+ * If decrypting, modify the ROUND_KEYS parameter to point to the last
+ * round key rather than the first, since for decryption the round keys
+ * are used in reverse order.
+ */
+.if \decrypting
+.if \n == 64
+ add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
+ sub ROUND_KEYS, #8
+.else
+ add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
+ sub ROUND_KEYS, #4
+.endif
+.endif
+
+ // Load the index vector for vtbl-based 8-bit rotates
+.if \decrypting
+ ldr r12, =.Lrol\n\()_8_table
+.else
+ ldr r12, =.Lror\n\()_8_table
+.endif
+ vld1.8 {ROTATE_TABLE}, [r12:64]
+
+ // One-time XTS preparation
+
+ /*
+ * Allocate stack space to store 128 bytes worth of tweaks. For
+ * performance, this space is aligned to a 16-byte boundary so that we
+ * can use the load/store instructions that declare 16-byte alignment.
+ */
+ sub sp, #128
+ bic sp, #0xf
+
+.if \n == 64
+ // Load first tweak
+ vld1.8 {TWEAKV}, [TWEAK]
+
+ // Load GF(2^128) multiplication table
+ ldr r12, =.Lgf128mul_table
+ vld1.8 {GF128MUL_TABLE}, [r12:64]
+.else
+ // Load first tweak
+ vld1.8 {TWEAKV_L}, [TWEAK]
+
+ // Load GF(2^64) multiplication table
+ ldr r12, =.Lgf64mul_table
+ vld1.8 {GF64MUL_TABLE}, [r12:64]
+
+ // Calculate second tweak, packing it together with the first
+ vshr.u64 TMP0_L, TWEAKV_L, #63
+ vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
+ vshl.u64 TWEAKV_H, TWEAKV_L, #1
+ veor TWEAKV_H, TMP0_L
+.endif
+
+.Lnext_128bytes_\@:
+
+ /*
+ * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
+ * values, and save the tweaks on the stack for later. Then
+ * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
+ * that the X[0-3] registers contain only the second halves of blocks,
+ * and the Y[0-3] registers contain only the first halves of blocks.
+ * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
+ */
+ mov r12, sp
+.if \n == 64
+ _xts128_precrypt_one X0, r12, TMP0
+ _xts128_precrypt_one Y0, r12, TMP0
+ _xts128_precrypt_one X1, r12, TMP0
+ _xts128_precrypt_one Y1, r12, TMP0
+ _xts128_precrypt_one X2, r12, TMP0
+ _xts128_precrypt_one Y2, r12, TMP0
+ _xts128_precrypt_one X3, r12, TMP0
+ _xts128_precrypt_one Y3, r12, TMP0
+ vswp X0_L, Y0_H
+ vswp X1_L, Y1_H
+ vswp X2_L, Y2_H
+ vswp X3_L, Y3_H
+.else
+ _xts64_precrypt_two X0, r12, TMP0
+ _xts64_precrypt_two Y0, r12, TMP0
+ _xts64_precrypt_two X1, r12, TMP0
+ _xts64_precrypt_two Y1, r12, TMP0
+ _xts64_precrypt_two X2, r12, TMP0
+ _xts64_precrypt_two Y2, r12, TMP0
+ _xts64_precrypt_two X3, r12, TMP0
+ _xts64_precrypt_two Y3, r12, TMP0
+ vuzp.32 Y0, X0
+ vuzp.32 Y1, X1
+ vuzp.32 Y2, X2
+ vuzp.32 Y3, X3
+.endif
+
+ // Do the cipher rounds
+
+ mov r12, ROUND_KEYS
+ mov r6, NROUNDS
+
+.Lnext_round_\@:
+.if \decrypting
+.if \n == 64
+ vld1.64 ROUND_KEY_L, [r12]
+ sub r12, #8
+ vmov ROUND_KEY_H, ROUND_KEY_L
+.else
+ vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
+ sub r12, #4
+.endif
+ _speck_unround_128bytes \n
+.else
+.if \n == 64
+ vld1.64 ROUND_KEY_L, [r12]!
+ vmov ROUND_KEY_H, ROUND_KEY_L
+.else
+ vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
+.endif
+ _speck_round_128bytes \n
+.endif
+ subs r6, r6, #1
+ bne .Lnext_round_\@
+
+ // Re-interleave the 'x' and 'y' elements of each block
+.if \n == 64
+ vswp X0_L, Y0_H
+ vswp X1_L, Y1_H
+ vswp X2_L, Y2_H
+ vswp X3_L, Y3_H
+.else
+ vzip.32 Y0, X0
+ vzip.32 Y1, X1
+ vzip.32 Y2, X2
+ vzip.32 Y3, X3
+.endif
+
+ // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
+ mov r12, sp
+ vld1.8 {TMP0, TMP1}, [r12:128]!
+ vld1.8 {TMP2, TMP3}, [r12:128]!
+ veor X0, TMP0
+ veor Y0, TMP1
+ veor X1, TMP2
+ veor Y1, TMP3
+ vld1.8 {TMP0, TMP1}, [r12:128]!
+ vld1.8 {TMP2, TMP3}, [r12:128]!
+ veor X2, TMP0
+ veor Y2, TMP1
+ veor X3, TMP2
+ veor Y3, TMP3
+
+ // Store the ciphertext in the destination buffer
+ vst1.8 {X0, Y0}, [DST]!
+ vst1.8 {X1, Y1}, [DST]!
+ vst1.8 {X2, Y2}, [DST]!
+ vst1.8 {X3, Y3}, [DST]!
+
+ // Continue if there are more 128-byte chunks remaining, else return
+ subs NBYTES, #128
+ bne .Lnext_128bytes_\@
+
+ // Store the next tweak
+.if \n == 64
+ vst1.8 {TWEAKV}, [TWEAK]
+.else
+ vst1.8 {TWEAKV_L}, [TWEAK]
+.endif
+
+ mov sp, r7
+ pop {r4-r7}
+ bx lr
+.endm
+
+ENTRY(speck128_xts_encrypt_neon)
+ _speck_xts_crypt n=64, decrypting=0
+ENDPROC(speck128_xts_encrypt_neon)
+
+ENTRY(speck128_xts_decrypt_neon)
+ _speck_xts_crypt n=64, decrypting=1
+ENDPROC(speck128_xts_decrypt_neon)
+
+ENTRY(speck64_xts_encrypt_neon)
+ _speck_xts_crypt n=32, decrypting=0
+ENDPROC(speck64_xts_encrypt_neon)
+
+ENTRY(speck64_xts_decrypt_neon)
+ _speck_xts_crypt n=32, decrypting=1
+ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
new file mode 100644
index 000000000000..f012c3ea998f
--- /dev/null
+++ b/arch/arm/crypto/speck-neon-glue.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
+ *
+ * Copyright (c) 2018 Google, Inc
+ *
+ * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
+ * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
+ * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
+ * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
+ * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
+ * OCB and PMAC"), represented as 0x1B.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/algapi.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/speck.h>
+#include <crypto/xts.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/* The assembly functions only handle multiples of 128 bytes */
+#define SPECK_NEON_CHUNK_SIZE 128
+
+/* Speck128 */
+
+struct speck128_xts_tfm_ctx {
+ struct speck128_tfm_ctx main_key;
+ struct speck128_tfm_ctx tweak_key;
+};
+
+asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
+ u8 *, const u8 *);
+typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
+ const void *, unsigned int, void *);
+
+static __always_inline int
+__speck128_xts_crypt(struct skcipher_request *req,
+ speck128_crypt_one_t crypt_one,
+ speck128_xts_crypt_many_t crypt_many)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ le128 tweak;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+ crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ const u8 *src = walk.src.virt.addr;
+
+ if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
+ unsigned int count;
+
+ count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
+ kernel_neon_begin();
+ (*crypt_many)(ctx->main_key.round_keys,
+ ctx->main_key.nrounds,
+ dst, src, count, &tweak);
+ kernel_neon_end();
+ dst += count;
+ src += count;
+ nbytes -= count;
+ }
+
+ /* Handle any remainder with generic code */
+ while (nbytes >= sizeof(tweak)) {
+ le128_xor((le128 *)dst, (const le128 *)src, &tweak);
+ (*crypt_one)(&ctx->main_key, dst, dst);
+ le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
+ gf128mul_x_ble(&tweak, &tweak);
+
+ dst += sizeof(tweak);
+ src += sizeof(tweak);
+ nbytes -= sizeof(tweak);
+ }
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int speck128_xts_encrypt(struct skcipher_request *req)
+{
+ return __speck128_xts_crypt(req, crypto_speck128_encrypt,
+ speck128_xts_encrypt_neon);
+}
+
+static int speck128_xts_decrypt(struct skcipher_request *req)
+{
+ return __speck128_xts_crypt(req, crypto_speck128_decrypt,
+ speck128_xts_decrypt_neon);
+}
+
+static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ keylen /= 2;
+
+ err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
+ if (err)
+ return err;
+
+ return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
+}
+
+/* Speck64 */
+
+struct speck64_xts_tfm_ctx {
+ struct speck64_tfm_ctx main_key;
+ struct speck64_tfm_ctx tweak_key;
+};
+
+asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
+ u8 *, const u8 *);
+typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
+ const void *, unsigned int, void *);
+
+static __always_inline int
+__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
+ speck64_xts_crypt_many_t crypt_many)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ __le64 tweak;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+ crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ const u8 *src = walk.src.virt.addr;
+
+ if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
+ unsigned int count;
+
+ count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
+ kernel_neon_begin();
+ (*crypt_many)(ctx->main_key.round_keys,
+ ctx->main_key.nrounds,
+ dst, src, count, &tweak);
+ kernel_neon_end();
+ dst += count;
+ src += count;
+ nbytes -= count;
+ }
+
+ /* Handle any remainder with generic code */
+ while (nbytes >= sizeof(tweak)) {
+ *(__le64 *)dst = *(__le64 *)src ^ tweak;
+ (*crypt_one)(&ctx->main_key, dst, dst);
+ *(__le64 *)dst ^= tweak;
+ tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
+ ((tweak & cpu_to_le64(1ULL << 63)) ?
+ 0x1B : 0));
+ dst += sizeof(tweak);
+ src += sizeof(tweak);
+ nbytes -= sizeof(tweak);
+ }
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int speck64_xts_encrypt(struct skcipher_request *req)
+{
+ return __speck64_xts_crypt(req, crypto_speck64_encrypt,
+ speck64_xts_encrypt_neon);
+}
+
+static int speck64_xts_decrypt(struct skcipher_request *req)
+{
+ return __speck64_xts_crypt(req, crypto_speck64_decrypt,
+ speck64_xts_decrypt_neon);
+}
+
+static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ keylen /= 2;
+
+ err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
+ if (err)
+ return err;
+
+ return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
+}
+
+static struct skcipher_alg speck_algs[] = {
+ {
+ .base.cra_name = "xts(speck128)",
+ .base.cra_driver_name = "xts-speck128-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SPECK128_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SPECK128_128_KEY_SIZE,
+ .max_keysize = 2 * SPECK128_256_KEY_SIZE,
+ .ivsize = SPECK128_BLOCK_SIZE,
+ .walksize = SPECK_NEON_CHUNK_SIZE,
+ .setkey = speck128_xts_setkey,
+ .encrypt = speck128_xts_encrypt,
+ .decrypt = speck128_xts_decrypt,
+ }, {
+ .base.cra_name = "xts(speck64)",
+ .base.cra_driver_name = "xts-speck64-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SPECK64_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SPECK64_96_KEY_SIZE,
+ .max_keysize = 2 * SPECK64_128_KEY_SIZE,
+ .ivsize = SPECK64_BLOCK_SIZE,
+ .walksize = SPECK_NEON_CHUNK_SIZE,
+ .setkey = speck64_xts_setkey,
+ .encrypt = speck64_xts_encrypt,
+ .decrypt = speck64_xts_decrypt,
+ }
+};
+
+static int __init speck_neon_module_init(void)
+{
+ if (!(elf_hwcap & HWCAP_NEON))
+ return -ENODEV;
+ return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+static void __exit speck_neon_module_exit(void)
+{
+ crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+module_init(speck_neon_module_init);
+module_exit(speck_neon_module_exit);
+
+MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("xts(speck128)");
+MODULE_ALIAS_CRYPTO("xts-speck128-neon");
+MODULE_ALIAS_CRYPTO("xts(speck64)");
+MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 285c36c7b408..cb5a243110c4 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -113,4 +113,10 @@ config CRYPTO_AES_ARM64_BS
select CRYPTO_AES_ARM64
select CRYPTO_SIMD
+config CRYPTO_SPECK_NEON
+ tristate "NEON accelerated Speck cipher algorithms"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_SPECK
+
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index cee9b8d9830b..8df9f326f449 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -53,20 +53,21 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
+obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
+speck-neon-y := speck-neon-core.o speck-neon-glue.o
+
obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
-AFLAGS_aes-ce.o := -DINTERLEAVE=4
-AFLAGS_aes-neon.o := -DINTERLEAVE=4
-
CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
+ifdef REGENERATE_ARM64_CRYPTO
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
@@ -75,5 +76,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
+endif
.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index a1254036f2b1..68b11aa690e4 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -107,11 +107,13 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
}
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
- u32 abytes, u32 *macp, bool use_neon)
+ u32 abytes, u32 *macp)
{
- if (likely(use_neon)) {
+ if (may_use_simd()) {
+ kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key));
+ kernel_neon_end();
} else {
if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
int added = min(abytes, AES_BLOCK_SIZE - *macp);
@@ -143,8 +145,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
}
}
-static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
- bool use_neon)
+static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
@@ -163,7 +164,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
ltag.len = 6;
}
- ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp, use_neon);
+ ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp);
scatterwalk_start(&walk, req->src);
do {
@@ -175,7 +176,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
n = scatterwalk_clamp(&walk, len);
}
p = scatterwalk_map(&walk);
- ccm_update_mac(ctx, mac, p, n, &macp, use_neon);
+ ccm_update_mac(ctx, mac, p, n, &macp);
len -= n;
scatterwalk_unmap(p);
@@ -242,43 +243,42 @@ static int ccm_encrypt(struct aead_request *req)
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen;
- bool use_neon = may_use_simd();
int err;
err = ccm_init_mac(req, mac, len);
if (err)
return err;
- if (likely(use_neon))
- kernel_neon_begin();
-
if (req->assoclen)
- ccm_calculate_auth_mac(req, mac, use_neon);
+ ccm_calculate_auth_mac(req, mac);
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, true);
- if (likely(use_neon)) {
+ if (may_use_simd()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
if (walk.nbytes == walk.total)
tail = 0;
+ kernel_neon_begin();
ce_aes_ccm_encrypt(walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, tail);
}
- if (!err)
+ if (!err) {
+ kernel_neon_begin();
ce_aes_ccm_final(mac, buf, ctx->key_enc,
num_rounds(ctx));
-
- kernel_neon_end();
+ kernel_neon_end();
+ }
} else {
err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
}
@@ -301,43 +301,42 @@ static int ccm_decrypt(struct aead_request *req)
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen - authsize;
- bool use_neon = may_use_simd();
int err;
err = ccm_init_mac(req, mac, len);
if (err)
return err;
- if (likely(use_neon))
- kernel_neon_begin();
-
if (req->assoclen)
- ccm_calculate_auth_mac(req, mac, use_neon);
+ ccm_calculate_auth_mac(req, mac);
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, true);
- if (likely(use_neon)) {
+ if (may_use_simd()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
if (walk.nbytes == walk.total)
tail = 0;
+ kernel_neon_begin();
ce_aes_ccm_decrypt(walk.dst.virt.addr,
walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, tail);
}
- if (!err)
+ if (!err) {
+ kernel_neon_begin();
ce_aes_ccm_final(mac, buf, ctx->key_enc,
num_rounds(ctx));
-
- kernel_neon_end();
+ kernel_neon_end();
+ }
} else {
err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 2fa850e86aa8..253188fb8cb0 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -64,17 +64,17 @@ MODULE_LICENSE("GPL v2");
/* defined in aes-modes.S */
asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, int first);
+ int rounds, int blocks);
asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, int first);
+ int rounds, int blocks);
asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, u8 iv[], int first);
+ int rounds, int blocks, u8 iv[]);
asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, u8 iv[], int first);
+ int rounds, int blocks, u8 iv[]);
asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
- int rounds, int blocks, u8 ctr[], int first);
+ int rounds, int blocks, u8 ctr[]);
asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
int rounds, int blocks, u8 const rk2[], u8 iv[],
@@ -133,19 +133,19 @@ static int ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = 6 + ctx->key_length / 4;
+ int err, rounds = 6 + ctx->key_length / 4;
struct skcipher_walk walk;
unsigned int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
- for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, first);
+ (u8 *)ctx->key_enc, rounds, blocks);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -153,19 +153,19 @@ static int ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = 6 + ctx->key_length / 4;
+ int err, rounds = 6 + ctx->key_length / 4;
struct skcipher_walk walk;
unsigned int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
- for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_dec, rounds, blocks, first);
+ (u8 *)ctx->key_dec, rounds, blocks);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -173,20 +173,19 @@ static int cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = 6 + ctx->key_length / 4;
+ int err, rounds = 6 + ctx->key_length / 4;
struct skcipher_walk walk;
unsigned int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
- for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
- first);
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -194,20 +193,19 @@ static int cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = 6 + ctx->key_length / 4;
+ int err, rounds = 6 + ctx->key_length / 4;
struct skcipher_walk walk;
unsigned int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
- for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
- first);
+ (u8 *)ctx->key_dec, rounds, blocks, walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -215,20 +213,18 @@ static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = 6 + ctx->key_length / 4;
+ int err, rounds = 6 + ctx->key_length / 4;
struct skcipher_walk walk;
int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- first = 1;
- kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
- first);
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
- first = 0;
+ kernel_neon_end();
}
if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
@@ -241,12 +237,13 @@ static int ctr_encrypt(struct skcipher_request *req)
*/
blocks = -1;
+ kernel_neon_begin();
aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
- blocks, walk.iv, first);
+ blocks, walk.iv);
+ kernel_neon_end();
crypto_xor_cpy(tdst, tsrc, tail, nbytes);
err = skcipher_walk_done(&walk, 0);
}
- kernel_neon_end();
return err;
}
@@ -270,16 +267,16 @@ static int xts_encrypt(struct skcipher_request *req)
struct skcipher_walk walk;
unsigned int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ kernel_neon_begin();
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -292,16 +289,16 @@ static int xts_decrypt(struct skcipher_request *req)
struct skcipher_walk walk;
unsigned int blocks;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+ kernel_neon_begin();
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -425,7 +422,7 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
/* encrypt the zero vector */
kernel_neon_begin();
- aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1, 1);
+ aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1);
kernel_neon_end();
cmac_gf128_mul_by_x(consts, consts);
@@ -454,8 +451,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
return err;
kernel_neon_begin();
- aes_ecb_encrypt(key, ks[0], rk, rounds, 1, 1);
- aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2, 0);
+ aes_ecb_encrypt(key, ks[0], rk, rounds, 1);
+ aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2);
kernel_neon_end();
return cbcmac_setkey(tfm, key, sizeof(key));
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 2674d43d1384..a68412e1e3a4 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -13,127 +13,39 @@
.text
.align 4
-/*
- * There are several ways to instantiate this code:
- * - no interleave, all inline
- * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
- * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
- * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
- * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
- *
- * Macros imported by this code:
- * - enc_prepare - setup NEON registers for encryption
- * - dec_prepare - setup NEON registers for decryption
- * - enc_switch_key - change to new key after having prepared for encryption
- * - encrypt_block - encrypt a single block
- * - decrypt block - decrypt a single block
- * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
- * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
- * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
- * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
- */
-
-#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
-#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
-#define FRAME_POP ldp x29, x30, [sp],#16
-
-#if INTERLEAVE == 2
-
-aes_encrypt_block2x:
- encrypt_block2x v0, v1, w3, x2, x6, w7
- ret
-ENDPROC(aes_encrypt_block2x)
-
-aes_decrypt_block2x:
- decrypt_block2x v0, v1, w3, x2, x6, w7
- ret
-ENDPROC(aes_decrypt_block2x)
-
-#elif INTERLEAVE == 4
-
aes_encrypt_block4x:
- encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_encrypt_block4x)
aes_decrypt_block4x:
- decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_decrypt_block4x)
-#else
-#error INTERLEAVE should equal 2 or 4
-#endif
-
- .macro do_encrypt_block2x
- bl aes_encrypt_block2x
- .endm
-
- .macro do_decrypt_block2x
- bl aes_decrypt_block2x
- .endm
-
- .macro do_encrypt_block4x
- bl aes_encrypt_block4x
- .endm
-
- .macro do_decrypt_block4x
- bl aes_decrypt_block4x
- .endm
-
-#else
-#define FRAME_PUSH
-#define FRAME_POP
-
- .macro do_encrypt_block2x
- encrypt_block2x v0, v1, w3, x2, x6, w7
- .endm
-
- .macro do_decrypt_block2x
- decrypt_block2x v0, v1, w3, x2, x6, w7
- .endm
-
- .macro do_encrypt_block4x
- encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
- .endm
-
- .macro do_decrypt_block4x
- decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
- .endm
-
-#endif
-
/*
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int blocks, int first)
+ * int blocks)
* aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int blocks, int first)
+ * int blocks)
*/
AES_ENTRY(aes_ecb_encrypt)
- FRAME_PUSH
- cbz w5, .LecbencloopNx
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
enc_prepare w3, x2, x5
.LecbencloopNx:
-#if INTERLEAVE >= 2
- subs w4, w4, #INTERLEAVE
+ subs w4, w4, #4
bmi .Lecbenc1x
-#if INTERLEAVE == 2
- ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
- do_encrypt_block2x
- st1 {v0.16b-v1.16b}, [x0], #32
-#else
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
- do_encrypt_block4x
+ bl aes_encrypt_block4x
st1 {v0.16b-v3.16b}, [x0], #64
-#endif
b .LecbencloopNx
.Lecbenc1x:
- adds w4, w4, #INTERLEAVE
+ adds w4, w4, #4
beq .Lecbencout
-#endif
.Lecbencloop:
ld1 {v0.16b}, [x1], #16 /* get next pt block */
encrypt_block v0, w3, x2, x5, w6
@@ -141,35 +53,27 @@ AES_ENTRY(aes_ecb_encrypt)
subs w4, w4, #1
bne .Lecbencloop
.Lecbencout:
- FRAME_POP
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_encrypt)
AES_ENTRY(aes_ecb_decrypt)
- FRAME_PUSH
- cbz w5, .LecbdecloopNx
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
dec_prepare w3, x2, x5
.LecbdecloopNx:
-#if INTERLEAVE >= 2
- subs w4, w4, #INTERLEAVE
+ subs w4, w4, #4
bmi .Lecbdec1x
-#if INTERLEAVE == 2
- ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
- do_decrypt_block2x
- st1 {v0.16b-v1.16b}, [x0], #32
-#else
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
- do_decrypt_block4x
+ bl aes_decrypt_block4x
st1 {v0.16b-v3.16b}, [x0], #64
-#endif
b .LecbdecloopNx
.Lecbdec1x:
- adds w4, w4, #INTERLEAVE
+ adds w4, w4, #4
beq .Lecbdecout
-#endif
.Lecbdecloop:
ld1 {v0.16b}, [x1], #16 /* get next ct block */
decrypt_block v0, w3, x2, x5, w6
@@ -177,62 +81,68 @@ AES_ENTRY(aes_ecb_decrypt)
subs w4, w4, #1
bne .Lecbdecloop
.Lecbdecout:
- FRAME_POP
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_decrypt)
/*
* aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int blocks, u8 iv[], int first)
+ * int blocks, u8 iv[])
* aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int blocks, u8 iv[], int first)
+ * int blocks, u8 iv[])
*/
AES_ENTRY(aes_cbc_encrypt)
- cbz w6, .Lcbcencloop
-
- ld1 {v0.16b}, [x5] /* get iv */
+ ld1 {v4.16b}, [x5] /* get iv */
enc_prepare w3, x2, x6
-.Lcbcencloop:
- ld1 {v1.16b}, [x1], #16 /* get next pt block */
- eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
+.Lcbcencloop4x:
+ subs w4, w4, #4
+ bmi .Lcbcenc1x
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
encrypt_block v0, w3, x2, x6, w7
- st1 {v0.16b}, [x0], #16
+ eor v1.16b, v1.16b, v0.16b
+ encrypt_block v1, w3, x2, x6, w7
+ eor v2.16b, v2.16b, v1.16b
+ encrypt_block v2, w3, x2, x6, w7
+ eor v3.16b, v3.16b, v2.16b
+ encrypt_block v3, w3, x2, x6, w7
+ st1 {v0.16b-v3.16b}, [x0], #64
+ mov v4.16b, v3.16b
+ b .Lcbcencloop4x
+.Lcbcenc1x:
+ adds w4, w4, #4
+ beq .Lcbcencout
+.Lcbcencloop:
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
+ eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
+ encrypt_block v4, w3, x2, x6, w7
+ st1 {v4.16b}, [x0], #16
subs w4, w4, #1
bne .Lcbcencloop
- st1 {v0.16b}, [x5] /* return iv */
+.Lcbcencout:
+ st1 {v4.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
AES_ENTRY(aes_cbc_decrypt)
- FRAME_PUSH
- cbz w6, .LcbcdecloopNx
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
ld1 {v7.16b}, [x5] /* get iv */
dec_prepare w3, x2, x6
.LcbcdecloopNx:
-#if INTERLEAVE >= 2
- subs w4, w4, #INTERLEAVE
+ subs w4, w4, #4
bmi .Lcbcdec1x
-#if INTERLEAVE == 2
- ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
- mov v2.16b, v0.16b
- mov v3.16b, v1.16b
- do_decrypt_block2x
- eor v0.16b, v0.16b, v7.16b
- eor v1.16b, v1.16b, v2.16b
- mov v7.16b, v3.16b
- st1 {v0.16b-v1.16b}, [x0], #32
-#else
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
mov v4.16b, v0.16b
mov v5.16b, v1.16b
mov v6.16b, v2.16b
- do_decrypt_block4x
+ bl aes_decrypt_block4x
sub x1, x1, #16
eor v0.16b, v0.16b, v7.16b
eor v1.16b, v1.16b, v4.16b
@@ -240,12 +150,10 @@ AES_ENTRY(aes_cbc_decrypt)
eor v2.16b, v2.16b, v5.16b
eor v3.16b, v3.16b, v6.16b
st1 {v0.16b-v3.16b}, [x0], #64
-#endif
b .LcbcdecloopNx
.Lcbcdec1x:
- adds w4, w4, #INTERLEAVE
+ adds w4, w4, #4
beq .Lcbcdecout
-#endif
.Lcbcdecloop:
ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
@@ -256,49 +164,33 @@ AES_ENTRY(aes_cbc_decrypt)
subs w4, w4, #1
bne .Lcbcdecloop
.Lcbcdecout:
- FRAME_POP
st1 {v7.16b}, [x5] /* return iv */
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_cbc_decrypt)
/*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int blocks, u8 ctr[], int first)
+ * int blocks, u8 ctr[])
*/
AES_ENTRY(aes_ctr_encrypt)
- FRAME_PUSH
- cbz w6, .Lctrnotfirst /* 1st time around? */
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
enc_prepare w3, x2, x6
ld1 {v4.16b}, [x5]
-.Lctrnotfirst:
- umov x8, v4.d[1] /* keep swabbed ctr in reg */
- rev x8, x8
-#if INTERLEAVE >= 2
- cmn w8, w4 /* 32 bit overflow? */
+ umov x6, v4.d[1] /* keep swabbed ctr in reg */
+ rev x6, x6
+ cmn w6, w4 /* 32 bit overflow? */
bcs .Lctrloop
.LctrloopNx:
- subs w4, w4, #INTERLEAVE
+ subs w4, w4, #4
bmi .Lctr1x
-#if INTERLEAVE == 2
- mov v0.8b, v4.8b
- mov v1.8b, v4.8b
- rev x7, x8
- add x8, x8, #1
- ins v0.d[1], x7
- rev x7, x8
- add x8, x8, #1
- ins v1.d[1], x7
- ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
- do_encrypt_block2x
- eor v0.16b, v0.16b, v2.16b
- eor v1.16b, v1.16b, v3.16b
- st1 {v0.16b-v1.16b}, [x0], #32
-#else
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
- dup v7.4s, w8
+ dup v7.4s, w6
mov v0.16b, v4.16b
add v7.4s, v7.4s, v8.4s
mov v1.16b, v4.16b
@@ -309,29 +201,27 @@ AES_ENTRY(aes_ctr_encrypt)
mov v2.s[3], v8.s[1]
mov v3.s[3], v8.s[2]
ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
- do_encrypt_block4x
+ bl aes_encrypt_block4x
eor v0.16b, v5.16b, v0.16b
ld1 {v5.16b}, [x1], #16 /* get 1 input block */
eor v1.16b, v6.16b, v1.16b
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
st1 {v0.16b-v3.16b}, [x0], #64
- add x8, x8, #INTERLEAVE
-#endif
- rev x7, x8
+ add x6, x6, #4
+ rev x7, x6
ins v4.d[1], x7
cbz w4, .Lctrout
b .LctrloopNx
.Lctr1x:
- adds w4, w4, #INTERLEAVE
+ adds w4, w4, #4
beq .Lctrout
-#endif
.Lctrloop:
mov v0.16b, v4.16b
- encrypt_block v0, w3, x2, x6, w7
+ encrypt_block v0, w3, x2, x8, w7
- adds x8, x8, #1 /* increment BE ctr */
- rev x7, x8
+ adds x6, x6, #1 /* increment BE ctr */
+ rev x7, x6
ins v4.d[1], x7
bcs .Lctrcarry /* overflow? */
@@ -345,12 +235,12 @@ AES_ENTRY(aes_ctr_encrypt)
.Lctrout:
st1 {v4.16b}, [x5] /* return next CTR value */
- FRAME_POP
+ ldp x29, x30, [sp], #16
ret
.Lctrtailblock:
st1 {v0.16b}, [x0]
- FRAME_POP
+ ldp x29, x30, [sp], #16
ret
.Lctrcarry:
@@ -384,39 +274,26 @@ CPU_LE( .quad 1, 0x87 )
CPU_BE( .quad 0x87, 1 )
AES_ENTRY(aes_xts_encrypt)
- FRAME_PUSH
- cbz w7, .LxtsencloopNx
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
ld1 {v4.16b}, [x6]
- enc_prepare w3, x5, x6
- encrypt_block v4, w3, x5, x6, w7 /* first tweak */
- enc_switch_key w3, x2, x6
+ cbz w7, .Lxtsencnotfirst
+
+ enc_prepare w3, x5, x8
+ encrypt_block v4, w3, x5, x8, w7 /* first tweak */
+ enc_switch_key w3, x2, x8
ldr q7, .Lxts_mul_x
b .LxtsencNx
+.Lxtsencnotfirst:
+ enc_prepare w3, x2, x8
.LxtsencloopNx:
ldr q7, .Lxts_mul_x
next_tweak v4, v4, v7, v8
.LxtsencNx:
-#if INTERLEAVE >= 2
- subs w4, w4, #INTERLEAVE
+ subs w4, w4, #4
bmi .Lxtsenc1x
-#if INTERLEAVE == 2
- ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
- next_tweak v5, v4, v7, v8
- eor v0.16b, v0.16b, v4.16b
- eor v1.16b, v1.16b, v5.16b
- do_encrypt_block2x
- eor v0.16b, v0.16b, v4.16b
- eor v1.16b, v1.16b, v5.16b
- st1 {v0.16b-v1.16b}, [x0], #32
- cbz w4, .LxtsencoutNx
- next_tweak v4, v5, v7, v8
- b .LxtsencNx
-.LxtsencoutNx:
- mov v4.16b, v5.16b
- b .Lxtsencout
-#else
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
next_tweak v5, v4, v7, v8
eor v0.16b, v0.16b, v4.16b
@@ -425,7 +302,7 @@ AES_ENTRY(aes_xts_encrypt)
eor v2.16b, v2.16b, v6.16b
next_tweak v7, v6, v7, v8
eor v3.16b, v3.16b, v7.16b
- do_encrypt_block4x
+ bl aes_encrypt_block4x
eor v3.16b, v3.16b, v7.16b
eor v0.16b, v0.16b, v4.16b
eor v1.16b, v1.16b, v5.16b
@@ -434,15 +311,13 @@ AES_ENTRY(aes_xts_encrypt)
mov v4.16b, v7.16b
cbz w4, .Lxtsencout
b .LxtsencloopNx
-#endif
.Lxtsenc1x:
- adds w4, w4, #INTERLEAVE
+ adds w4, w4, #4
beq .Lxtsencout
-#endif
.Lxtsencloop:
ld1 {v1.16b}, [x1], #16
eor v0.16b, v1.16b, v4.16b
- encrypt_block v0, w3, x2, x6, w7
+ encrypt_block v0, w3, x2, x8, w7
eor v0.16b, v0.16b, v4.16b
st1 {v0.16b}, [x0], #16
subs w4, w4, #1
@@ -450,45 +325,33 @@ AES_ENTRY(aes_xts_encrypt)
next_tweak v4, v4, v7, v8
b .Lxtsencloop
.Lxtsencout:
- FRAME_POP
+ st1 {v4.16b}, [x6]
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_xts_encrypt)
AES_ENTRY(aes_xts_decrypt)
- FRAME_PUSH
- cbz w7, .LxtsdecloopNx
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
ld1 {v4.16b}, [x6]
- enc_prepare w3, x5, x6
- encrypt_block v4, w3, x5, x6, w7 /* first tweak */
- dec_prepare w3, x2, x6
+ cbz w7, .Lxtsdecnotfirst
+
+ enc_prepare w3, x5, x8
+ encrypt_block v4, w3, x5, x8, w7 /* first tweak */
+ dec_prepare w3, x2, x8
ldr q7, .Lxts_mul_x
b .LxtsdecNx
+.Lxtsdecnotfirst:
+ dec_prepare w3, x2, x8
.LxtsdecloopNx:
ldr q7, .Lxts_mul_x
next_tweak v4, v4, v7, v8
.LxtsdecNx:
-#if INTERLEAVE >= 2
- subs w4, w4, #INTERLEAVE
+ subs w4, w4, #4
bmi .Lxtsdec1x
-#if INTERLEAVE == 2
- ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
- next_tweak v5, v4, v7, v8
- eor v0.16b, v0.16b, v4.16b
- eor v1.16b, v1.16b, v5.16b
- do_decrypt_block2x
- eor v0.16b, v0.16b, v4.16b
- eor v1.16b, v1.16b, v5.16b
- st1 {v0.16b-v1.16b}, [x0], #32
- cbz w4, .LxtsdecoutNx
- next_tweak v4, v5, v7, v8
- b .LxtsdecNx
-.LxtsdecoutNx:
- mov v4.16b, v5.16b
- b .Lxtsdecout
-#else
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
next_tweak v5, v4, v7, v8
eor v0.16b, v0.16b, v4.16b
@@ -497,7 +360,7 @@ AES_ENTRY(aes_xts_decrypt)
eor v2.16b, v2.16b, v6.16b
next_tweak v7, v6, v7, v8
eor v3.16b, v3.16b, v7.16b
- do_decrypt_block4x
+ bl aes_decrypt_block4x
eor v3.16b, v3.16b, v7.16b
eor v0.16b, v0.16b, v4.16b
eor v1.16b, v1.16b, v5.16b
@@ -506,15 +369,13 @@ AES_ENTRY(aes_xts_decrypt)
mov v4.16b, v7.16b
cbz w4, .Lxtsdecout
b .LxtsdecloopNx
-#endif
.Lxtsdec1x:
- adds w4, w4, #INTERLEAVE
+ adds w4, w4, #4
beq .Lxtsdecout
-#endif
.Lxtsdecloop:
ld1 {v1.16b}, [x1], #16
eor v0.16b, v1.16b, v4.16b
- decrypt_block v0, w3, x2, x6, w7
+ decrypt_block v0, w3, x2, x8, w7
eor v0.16b, v0.16b, v4.16b
st1 {v0.16b}, [x0], #16
subs w4, w4, #1
@@ -522,7 +383,8 @@ AES_ENTRY(aes_xts_decrypt)
next_tweak v4, v4, v7, v8
b .Lxtsdecloop
.Lxtsdecout:
- FRAME_POP
+ st1 {v4.16b}, [x6]
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_xts_decrypt)
@@ -533,8 +395,28 @@ AES_ENDPROC(aes_xts_decrypt)
AES_ENTRY(aes_mac_update)
ld1 {v0.16b}, [x4] /* get dg */
enc_prepare w2, x1, x7
- cbnz w5, .Lmacenc
+ cbz w5, .Lmacloop4x
+
+ encrypt_block v0, w2, x1, x7, w8
+.Lmacloop4x:
+ subs w3, w3, #4
+ bmi .Lmac1x
+ ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
+ eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
+ encrypt_block v0, w2, x1, x7, w8
+ eor v0.16b, v0.16b, v2.16b
+ encrypt_block v0, w2, x1, x7, w8
+ eor v0.16b, v0.16b, v3.16b
+ encrypt_block v0, w2, x1, x7, w8
+ eor v0.16b, v0.16b, v4.16b
+ cmp w3, wzr
+ csinv x5, x6, xzr, eq
+ cbz w5, .Lmacout
+ encrypt_block v0, w2, x1, x7, w8
+ b .Lmacloop4x
+.Lmac1x:
+ add w3, w3, #4
.Lmacloop:
cbz w3, .Lmacout
ld1 {v1.16b}, [x0], #16 /* get next pt block */
@@ -544,7 +426,6 @@ AES_ENTRY(aes_mac_update)
csinv x5, x6, xzr, eq
cbz w5, .Lmacout
-.Lmacenc:
encrypt_block v0, w2, x1, x7, w8
b .Lmacloop
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index c55d68ccb89f..e7a95a566462 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -46,10 +46,9 @@ asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
/* borrowed from aes-neon-blk.ko */
asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks, int first);
+ int rounds, int blocks);
asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks, u8 iv[],
- int first);
+ int rounds, int blocks, u8 iv[]);
struct aesbs_ctx {
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
@@ -100,9 +99,8 @@ static int __ecb_crypt(struct skcipher_request *req,
struct skcipher_walk walk;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -110,12 +108,13 @@ static int __ecb_crypt(struct skcipher_request *req,
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
+ kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
ctx->rounds, blocks);
+ kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -157,22 +156,21 @@ static int cbc_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
- int err, first = 1;
+ int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
/* fall back to the non-bitsliced NEON implementation */
+ kernel_neon_begin();
neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->enc, ctx->key.rounds, blocks, walk.iv,
- first);
+ ctx->enc, ctx->key.rounds, blocks,
+ walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
- first = 0;
}
- kernel_neon_end();
return err;
}
@@ -183,9 +181,8 @@ static int cbc_decrypt(struct skcipher_request *req)
struct skcipher_walk walk;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -193,13 +190,14 @@ static int cbc_decrypt(struct skcipher_request *req)
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
+ kernel_neon_begin();
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key.rk, ctx->key.rounds, blocks,
walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
- kernel_neon_end();
return err;
}
@@ -231,9 +229,8 @@ static int ctr_encrypt(struct skcipher_request *req)
u8 buf[AES_BLOCK_SIZE];
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_neon_begin();
while (walk.nbytes > 0) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
@@ -244,8 +241,10 @@ static int ctr_encrypt(struct skcipher_request *req)
final = NULL;
}
+ kernel_neon_begin();
aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->rk, ctx->rounds, blocks, walk.iv, final);
+ kernel_neon_end();
if (final) {
u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
@@ -260,8 +259,6 @@ static int ctr_encrypt(struct skcipher_request *req)
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
- kernel_neon_end();
-
return err;
}
@@ -306,12 +303,11 @@ static int __xts_crypt(struct skcipher_request *req,
struct skcipher_walk walk;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
kernel_neon_begin();
-
- neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey,
- ctx->key.rounds, 1, 1);
+ neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
+ kernel_neon_end();
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -320,13 +316,13 @@ static int __xts_crypt(struct skcipher_request *req,
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
+ kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
ctx->key.rounds, blocks, walk.iv);
+ kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
- kernel_neon_end();
-
return err;
}
diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c
index cbdb75d15cd0..727579c93ded 100644
--- a/arch/arm64/crypto/chacha20-neon-glue.c
+++ b/arch/arm64/crypto/chacha20-neon-glue.c
@@ -37,12 +37,19 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
u8 buf[CHACHA20_BLOCK_SIZE];
while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
+ kernel_neon_begin();
chacha20_4block_xor_neon(state, dst, src);
+ kernel_neon_end();
bytes -= CHACHA20_BLOCK_SIZE * 4;
src += CHACHA20_BLOCK_SIZE * 4;
dst += CHACHA20_BLOCK_SIZE * 4;
state[12] += 4;
}
+
+ if (!bytes)
+ return;
+
+ kernel_neon_begin();
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block_xor_neon(state, dst, src);
bytes -= CHACHA20_BLOCK_SIZE;
@@ -55,6 +62,7 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
chacha20_block_xor_neon(state, buf, buf);
memcpy(dst, buf, bytes);
}
+ kernel_neon_end();
}
static int chacha20_neon(struct skcipher_request *req)
@@ -68,11 +76,10 @@ static int chacha20_neon(struct skcipher_request *req)
if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE)
return crypto_chacha20_crypt(req);
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
crypto_chacha20_init(state, ctx, walk.iv);
- kernel_neon_begin();
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -83,7 +90,6 @@ static int chacha20_neon(struct skcipher_request *req)
nbytes);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
- kernel_neon_end();
return err;
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index b064d925fe2a..e8880ccdc71f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -89,21 +89,32 @@ static struct shash_alg algs[] = { {
static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- /*
- * Stacking and unstacking a substantial slice of the NEON register
- * file may significantly affect performance for small updates when
- * executing in interrupt context, so fall back to the scalar code
- * in that case.
- */
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
if (!may_use_simd())
return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
- kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_neon);
- kernel_neon_end();
+ while (len > 0) {
+ unsigned int chunk = len;
+
+ /*
+ * Don't hog the CPU for the entire time it takes to process all
+ * input when running on a preemptible kernel, but process the
+ * data block by block instead.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT) &&
+ chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
+ chunk = SHA256_BLOCK_SIZE -
+ sctx->count % SHA256_BLOCK_SIZE;
+ kernel_neon_begin();
+ sha256_base_do_update(desc, data, chunk,
+ (sha256_block_fn *)sha256_block_neon);
+ kernel_neon_end();
+ data += chunk;
+ len -= chunk;
+ }
return 0;
}
@@ -117,10 +128,9 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
sha256_base_do_finalize(desc,
(sha256_block_fn *)sha256_block_data_order);
} else {
- kernel_neon_begin();
if (len)
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_update_neon(desc, data, len);
+ kernel_neon_begin();
sha256_base_do_finalize(desc,
(sha256_block_fn *)sha256_block_neon);
kernel_neon_end();
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
new file mode 100644
index 000000000000..b14463438b09
--- /dev/null
+++ b/arch/arm64/crypto/speck-neon-core.S
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
+ *
+ * Copyright (c) 2018 Google, Inc
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+ .text
+
+ // arguments
+ ROUND_KEYS .req x0 // const {u64,u32} *round_keys
+ NROUNDS .req w1 // int nrounds
+ NROUNDS_X .req x1
+ DST .req x2 // void *dst
+ SRC .req x3 // const void *src
+ NBYTES .req w4 // unsigned int nbytes
+ TWEAK .req x5 // void *tweak
+
+ // registers which hold the data being encrypted/decrypted
+ // (underscores avoid a naming collision with ARM64 registers x0-x3)
+ X_0 .req v0
+ Y_0 .req v1
+ X_1 .req v2
+ Y_1 .req v3
+ X_2 .req v4
+ Y_2 .req v5
+ X_3 .req v6
+ Y_3 .req v7
+
+ // the round key, duplicated in all lanes
+ ROUND_KEY .req v8
+
+ // index vector for tbl-based 8-bit rotates
+ ROTATE_TABLE .req v9
+ ROTATE_TABLE_Q .req q9
+
+ // temporary registers
+ TMP0 .req v10
+ TMP1 .req v11
+ TMP2 .req v12
+ TMP3 .req v13
+
+ // multiplication table for updating XTS tweaks
+ GFMUL_TABLE .req v14
+ GFMUL_TABLE_Q .req q14
+
+ // next XTS tweak value(s)
+ TWEAKV_NEXT .req v15
+
+ // XTS tweaks for the blocks currently being encrypted/decrypted
+ TWEAKV0 .req v16
+ TWEAKV1 .req v17
+ TWEAKV2 .req v18
+ TWEAKV3 .req v19
+ TWEAKV4 .req v20
+ TWEAKV5 .req v21
+ TWEAKV6 .req v22
+ TWEAKV7 .req v23
+
+ .align 4
+.Lror64_8_table:
+ .octa 0x080f0e0d0c0b0a090007060504030201
+.Lror32_8_table:
+ .octa 0x0c0f0e0d080b0a090407060500030201
+.Lrol64_8_table:
+ .octa 0x0e0d0c0b0a09080f0605040302010007
+.Lrol32_8_table:
+ .octa 0x0e0d0c0f0a09080b0605040702010003
+.Lgf128mul_table:
+ .octa 0x00000000000000870000000000000001
+.Lgf64mul_table:
+ .octa 0x0000000000000000000000002d361b00
+
+/*
+ * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
+ *
+ * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
+ * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
+ * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
+ * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
+ */
+.macro _speck_round_128bytes n, lanes
+
+ // x = ror(x, 8)
+ tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
+ tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
+ tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
+ tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
+
+ // x += y
+ add X_0.\lanes, X_0.\lanes, Y_0.\lanes
+ add X_1.\lanes, X_1.\lanes, Y_1.\lanes
+ add X_2.\lanes, X_2.\lanes, Y_2.\lanes
+ add X_3.\lanes, X_3.\lanes, Y_3.\lanes
+
+ // x ^= k
+ eor X_0.16b, X_0.16b, ROUND_KEY.16b
+ eor X_1.16b, X_1.16b, ROUND_KEY.16b
+ eor X_2.16b, X_2.16b, ROUND_KEY.16b
+ eor X_3.16b, X_3.16b, ROUND_KEY.16b
+
+ // y = rol(y, 3)
+ shl TMP0.\lanes, Y_0.\lanes, #3
+ shl TMP1.\lanes, Y_1.\lanes, #3
+ shl TMP2.\lanes, Y_2.\lanes, #3
+ shl TMP3.\lanes, Y_3.\lanes, #3
+ sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
+ sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
+ sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
+ sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
+
+ // y ^= x
+ eor Y_0.16b, TMP0.16b, X_0.16b
+ eor Y_1.16b, TMP1.16b, X_1.16b
+ eor Y_2.16b, TMP2.16b, X_2.16b
+ eor Y_3.16b, TMP3.16b, X_3.16b
+.endm
+
+/*
+ * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
+ *
+ * This is the inverse of _speck_round_128bytes().
+ */
+.macro _speck_unround_128bytes n, lanes
+
+ // y ^= x
+ eor TMP0.16b, Y_0.16b, X_0.16b
+ eor TMP1.16b, Y_1.16b, X_1.16b
+ eor TMP2.16b, Y_2.16b, X_2.16b
+ eor TMP3.16b, Y_3.16b, X_3.16b
+
+ // y = ror(y, 3)
+ ushr Y_0.\lanes, TMP0.\lanes, #3
+ ushr Y_1.\lanes, TMP1.\lanes, #3
+ ushr Y_2.\lanes, TMP2.\lanes, #3
+ ushr Y_3.\lanes, TMP3.\lanes, #3
+ sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
+ sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
+ sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
+ sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
+
+ // x ^= k
+ eor X_0.16b, X_0.16b, ROUND_KEY.16b
+ eor X_1.16b, X_1.16b, ROUND_KEY.16b
+ eor X_2.16b, X_2.16b, ROUND_KEY.16b
+ eor X_3.16b, X_3.16b, ROUND_KEY.16b
+
+ // x -= y
+ sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
+ sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
+ sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
+ sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
+
+ // x = rol(x, 8)
+ tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
+ tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
+ tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
+ tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
+.endm
+
+.macro _next_xts_tweak next, cur, tmp, n
+.if \n == 64
+ /*
+ * Calculate the next tweak by multiplying the current one by x,
+ * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
+ */
+ sshr \tmp\().2d, \cur\().2d, #63
+ and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
+ shl \next\().2d, \cur\().2d, #1
+ ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
+ eor \next\().16b, \next\().16b, \tmp\().16b
+.else
+ /*
+ * Calculate the next two tweaks by multiplying the current ones by x^2,
+ * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
+ */
+ ushr \tmp\().2d, \cur\().2d, #62
+ shl \next\().2d, \cur\().2d, #2
+ tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
+ eor \next\().16b, \next\().16b, \tmp\().16b
+.endif
+.endm
+
+/*
+ * _speck_xts_crypt() - Speck-XTS encryption/decryption
+ *
+ * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
+ * using Speck-XTS, specifically the variant with a block size of '2n' and round
+ * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
+ * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
+ * nonzero multiple of 128.
+ */
+.macro _speck_xts_crypt n, lanes, decrypting
+
+ /*
+ * If decrypting, modify the ROUND_KEYS parameter to point to the last
+ * round key rather than the first, since for decryption the round keys
+ * are used in reverse order.
+ */
+.if \decrypting
+ mov NROUNDS, NROUNDS /* zero the high 32 bits */
+.if \n == 64
+ add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
+ sub ROUND_KEYS, ROUND_KEYS, #8
+.else
+ add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
+ sub ROUND_KEYS, ROUND_KEYS, #4
+.endif
+.endif
+
+ // Load the index vector for tbl-based 8-bit rotates
+.if \decrypting
+ ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
+.else
+ ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
+.endif
+
+ // One-time XTS preparation
+.if \n == 64
+ // Load first tweak
+ ld1 {TWEAKV0.16b}, [TWEAK]
+
+ // Load GF(2^128) multiplication table
+ ldr GFMUL_TABLE_Q, .Lgf128mul_table
+.else
+ // Load first tweak
+ ld1 {TWEAKV0.8b}, [TWEAK]
+
+ // Load GF(2^64) multiplication table
+ ldr GFMUL_TABLE_Q, .Lgf64mul_table
+
+ // Calculate second tweak, packing it together with the first
+ ushr TMP0.2d, TWEAKV0.2d, #63
+ shl TMP1.2d, TWEAKV0.2d, #1
+ tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
+ eor TMP0.8b, TMP0.8b, TMP1.8b
+ mov TWEAKV0.d[1], TMP0.d[0]
+.endif
+
+.Lnext_128bytes_\@:
+
+ // Calculate XTS tweaks for next 128 bytes
+ _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
+ _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
+ _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
+ _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
+ _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
+ _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
+ _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
+ _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
+
+ // Load the next source blocks into {X,Y}[0-3]
+ ld1 {X_0.16b-Y_1.16b}, [SRC], #64
+ ld1 {X_2.16b-Y_3.16b}, [SRC], #64
+
+ // XOR the source blocks with their XTS tweaks
+ eor TMP0.16b, X_0.16b, TWEAKV0.16b
+ eor Y_0.16b, Y_0.16b, TWEAKV1.16b
+ eor TMP1.16b, X_1.16b, TWEAKV2.16b
+ eor Y_1.16b, Y_1.16b, TWEAKV3.16b
+ eor TMP2.16b, X_2.16b, TWEAKV4.16b
+ eor Y_2.16b, Y_2.16b, TWEAKV5.16b
+ eor TMP3.16b, X_3.16b, TWEAKV6.16b
+ eor Y_3.16b, Y_3.16b, TWEAKV7.16b
+
+ /*
+ * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
+ * that the X[0-3] registers contain only the second halves of blocks,
+ * and the Y[0-3] registers contain only the first halves of blocks.
+ * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
+ */
+ uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
+ uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
+ uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
+ uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
+ uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
+ uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
+ uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
+ uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
+
+ // Do the cipher rounds
+ mov x6, ROUND_KEYS
+ mov w7, NROUNDS
+.Lnext_round_\@:
+.if \decrypting
+ ld1r {ROUND_KEY.\lanes}, [x6]
+ sub x6, x6, #( \n / 8 )
+ _speck_unround_128bytes \n, \lanes
+.else
+ ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
+ _speck_round_128bytes \n, \lanes
+.endif
+ subs w7, w7, #1
+ bne .Lnext_round_\@
+
+ // Re-interleave the 'x' and 'y' elements of each block
+ zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
+ zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
+ zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
+ zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
+ zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
+ zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
+ zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
+ zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
+
+ // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
+ eor X_0.16b, TMP0.16b, TWEAKV0.16b
+ eor Y_0.16b, Y_0.16b, TWEAKV1.16b
+ eor X_1.16b, TMP1.16b, TWEAKV2.16b
+ eor Y_1.16b, Y_1.16b, TWEAKV3.16b
+ eor X_2.16b, TMP2.16b, TWEAKV4.16b
+ eor Y_2.16b, Y_2.16b, TWEAKV5.16b
+ eor X_3.16b, TMP3.16b, TWEAKV6.16b
+ eor Y_3.16b, Y_3.16b, TWEAKV7.16b
+ mov TWEAKV0.16b, TWEAKV_NEXT.16b
+
+ // Store the ciphertext in the destination buffer
+ st1 {X_0.16b-Y_1.16b}, [DST], #64
+ st1 {X_2.16b-Y_3.16b}, [DST], #64
+
+ // Continue if there are more 128-byte chunks remaining
+ subs NBYTES, NBYTES, #128
+ bne .Lnext_128bytes_\@
+
+ // Store the next tweak and return
+.if \n == 64
+ st1 {TWEAKV_NEXT.16b}, [TWEAK]
+.else
+ st1 {TWEAKV_NEXT.8b}, [TWEAK]
+.endif
+ ret
+.endm
+
+ENTRY(speck128_xts_encrypt_neon)
+ _speck_xts_crypt n=64, lanes=2d, decrypting=0
+ENDPROC(speck128_xts_encrypt_neon)
+
+ENTRY(speck128_xts_decrypt_neon)
+ _speck_xts_crypt n=64, lanes=2d, decrypting=1
+ENDPROC(speck128_xts_decrypt_neon)
+
+ENTRY(speck64_xts_encrypt_neon)
+ _speck_xts_crypt n=32, lanes=4s, decrypting=0
+ENDPROC(speck64_xts_encrypt_neon)
+
+ENTRY(speck64_xts_decrypt_neon)
+ _speck_xts_crypt n=32, lanes=4s, decrypting=1
+ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
new file mode 100644
index 000000000000..6e233aeb4ff4
--- /dev/null
+++ b/arch/arm64/crypto/speck-neon-glue.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
+ * (64-bit version; based on the 32-bit version)
+ *
+ * Copyright (c) 2018 Google, Inc
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/algapi.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/speck.h>
+#include <crypto/xts.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+/* The assembly functions only handle multiples of 128 bytes */
+#define SPECK_NEON_CHUNK_SIZE 128
+
+/* Speck128 */
+
+struct speck128_xts_tfm_ctx {
+ struct speck128_tfm_ctx main_key;
+ struct speck128_tfm_ctx tweak_key;
+};
+
+asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
+ u8 *, const u8 *);
+typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
+ const void *, unsigned int, void *);
+
+static __always_inline int
+__speck128_xts_crypt(struct skcipher_request *req,
+ speck128_crypt_one_t crypt_one,
+ speck128_xts_crypt_many_t crypt_many)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ le128 tweak;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+ crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ const u8 *src = walk.src.virt.addr;
+
+ if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
+ unsigned int count;
+
+ count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
+ kernel_neon_begin();
+ (*crypt_many)(ctx->main_key.round_keys,
+ ctx->main_key.nrounds,
+ dst, src, count, &tweak);
+ kernel_neon_end();
+ dst += count;
+ src += count;
+ nbytes -= count;
+ }
+
+ /* Handle any remainder with generic code */
+ while (nbytes >= sizeof(tweak)) {
+ le128_xor((le128 *)dst, (const le128 *)src, &tweak);
+ (*crypt_one)(&ctx->main_key, dst, dst);
+ le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
+ gf128mul_x_ble(&tweak, &tweak);
+
+ dst += sizeof(tweak);
+ src += sizeof(tweak);
+ nbytes -= sizeof(tweak);
+ }
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int speck128_xts_encrypt(struct skcipher_request *req)
+{
+ return __speck128_xts_crypt(req, crypto_speck128_encrypt,
+ speck128_xts_encrypt_neon);
+}
+
+static int speck128_xts_decrypt(struct skcipher_request *req)
+{
+ return __speck128_xts_crypt(req, crypto_speck128_decrypt,
+ speck128_xts_decrypt_neon);
+}
+
+static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ keylen /= 2;
+
+ err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
+ if (err)
+ return err;
+
+ return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
+}
+
+/* Speck64 */
+
+struct speck64_xts_tfm_ctx {
+ struct speck64_tfm_ctx main_key;
+ struct speck64_tfm_ctx tweak_key;
+};
+
+asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
+ void *dst, const void *src,
+ unsigned int nbytes, void *tweak);
+
+typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
+ u8 *, const u8 *);
+typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
+ const void *, unsigned int, void *);
+
+static __always_inline int
+__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
+ speck64_xts_crypt_many_t crypt_many)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ __le64 tweak;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+ crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ const u8 *src = walk.src.virt.addr;
+
+ if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
+ unsigned int count;
+
+ count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
+ kernel_neon_begin();
+ (*crypt_many)(ctx->main_key.round_keys,
+ ctx->main_key.nrounds,
+ dst, src, count, &tweak);
+ kernel_neon_end();
+ dst += count;
+ src += count;
+ nbytes -= count;
+ }
+
+ /* Handle any remainder with generic code */
+ while (nbytes >= sizeof(tweak)) {
+ *(__le64 *)dst = *(__le64 *)src ^ tweak;
+ (*crypt_one)(&ctx->main_key, dst, dst);
+ *(__le64 *)dst ^= tweak;
+ tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
+ ((tweak & cpu_to_le64(1ULL << 63)) ?
+ 0x1B : 0));
+ dst += sizeof(tweak);
+ src += sizeof(tweak);
+ nbytes -= sizeof(tweak);
+ }
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static int speck64_xts_encrypt(struct skcipher_request *req)
+{
+ return __speck64_xts_crypt(req, crypto_speck64_encrypt,
+ speck64_xts_encrypt_neon);
+}
+
+static int speck64_xts_decrypt(struct skcipher_request *req)
+{
+ return __speck64_xts_crypt(req, crypto_speck64_decrypt,
+ speck64_xts_decrypt_neon);
+}
+
+static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ keylen /= 2;
+
+ err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
+ if (err)
+ return err;
+
+ return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
+}
+
+static struct skcipher_alg speck_algs[] = {
+ {
+ .base.cra_name = "xts(speck128)",
+ .base.cra_driver_name = "xts-speck128-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SPECK128_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SPECK128_128_KEY_SIZE,
+ .max_keysize = 2 * SPECK128_256_KEY_SIZE,
+ .ivsize = SPECK128_BLOCK_SIZE,
+ .walksize = SPECK_NEON_CHUNK_SIZE,
+ .setkey = speck128_xts_setkey,
+ .encrypt = speck128_xts_encrypt,
+ .decrypt = speck128_xts_decrypt,
+ }, {
+ .base.cra_name = "xts(speck64)",
+ .base.cra_driver_name = "xts-speck64-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = SPECK64_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SPECK64_96_KEY_SIZE,
+ .max_keysize = 2 * SPECK64_128_KEY_SIZE,
+ .ivsize = SPECK64_BLOCK_SIZE,
+ .walksize = SPECK_NEON_CHUNK_SIZE,
+ .setkey = speck64_xts_setkey,
+ .encrypt = speck64_xts_encrypt,
+ .decrypt = speck64_xts_decrypt,
+ }
+};
+
+static int __init speck_neon_module_init(void)
+{
+ if (!(elf_hwcap & HWCAP_ASIMD))
+ return -ENODEV;
+ return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+static void __exit speck_neon_module_exit(void)
+{
+ crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+module_init(speck_neon_module_init);
+module_exit(speck_neon_module_exit);
+
+MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("xts(speck128)");
+MODULE_ALIAS_CRYPTO("xts-speck128-neon");
+MODULE_ALIAS_CRYPTO("xts(speck64)");
+MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 12e8484a8ee7..e762ef417562 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -94,23 +94,30 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define STACK_OFFSET 8*3
-#define HashKey 16*0 // store HashKey <<1 mod poly here
-#define HashKey_2 16*1 // store HashKey^2 <<1 mod poly here
-#define HashKey_3 16*2 // store HashKey^3 <<1 mod poly here
-#define HashKey_4 16*3 // store HashKey^4 <<1 mod poly here
-#define HashKey_k 16*4 // store XOR of High 64 bits and Low 64
+
+#define AadHash 16*0
+#define AadLen 16*1
+#define InLen (16*1)+8
+#define PBlockEncKey 16*2
+#define OrigIV 16*3
+#define CurCount 16*4
+#define PBlockLen 16*5
+#define HashKey 16*6 // store HashKey <<1 mod poly here
+#define HashKey_2 16*7 // store HashKey^2 <<1 mod poly here
+#define HashKey_3 16*8 // store HashKey^3 <<1 mod poly here
+#define HashKey_4 16*9 // store HashKey^4 <<1 mod poly here
+#define HashKey_k 16*10 // store XOR of High 64 bits and Low 64
// bits of HashKey <<1 mod poly here
//(for Karatsuba purposes)
-#define HashKey_2_k 16*5 // store XOR of High 64 bits and Low 64
+#define HashKey_2_k 16*11 // store XOR of High 64 bits and Low 64
// bits of HashKey^2 <<1 mod poly here
// (for Karatsuba purposes)
-#define HashKey_3_k 16*6 // store XOR of High 64 bits and Low 64
+#define HashKey_3_k 16*12 // store XOR of High 64 bits and Low 64
// bits of HashKey^3 <<1 mod poly here
// (for Karatsuba purposes)
-#define HashKey_4_k 16*7 // store XOR of High 64 bits and Low 64
+#define HashKey_4_k 16*13 // store XOR of High 64 bits and Low 64
// bits of HashKey^4 <<1 mod poly here
// (for Karatsuba purposes)
-#define VARIABLE_OFFSET 16*8
#define arg1 rdi
#define arg2 rsi
@@ -118,10 +125,11 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define arg4 rcx
#define arg5 r8
#define arg6 r9
-#define arg7 STACK_OFFSET+8(%r14)
-#define arg8 STACK_OFFSET+16(%r14)
-#define arg9 STACK_OFFSET+24(%r14)
-#define arg10 STACK_OFFSET+32(%r14)
+#define arg7 STACK_OFFSET+8(%rsp)
+#define arg8 STACK_OFFSET+16(%rsp)
+#define arg9 STACK_OFFSET+24(%rsp)
+#define arg10 STACK_OFFSET+32(%rsp)
+#define arg11 STACK_OFFSET+40(%rsp)
#define keysize 2*15*16(%arg1)
#endif
@@ -171,6 +179,332 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define TKEYP T1
#endif
+.macro FUNC_SAVE
+ push %r12
+ push %r13
+ push %r14
+#
+# states of %xmm registers %xmm6:%xmm15 not saved
+# all %xmm registers are clobbered
+#
+.endm
+
+
+.macro FUNC_RESTORE
+ pop %r14
+ pop %r13
+ pop %r12
+.endm
+
+# Precompute hashkeys.
+# Input: Hash subkey.
+# Output: HashKeys stored in gcm_context_data. Only needs to be called
+# once per key.
+# clobbers r12, and tmp xmm registers.
+.macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
+ mov \SUBKEY, %r12
+ movdqu (%r12), \TMP3
+ movdqa SHUF_MASK(%rip), \TMP2
+ PSHUFB_XMM \TMP2, \TMP3
+
+ # precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
+
+ movdqa \TMP3, \TMP2
+ psllq $1, \TMP3
+ psrlq $63, \TMP2
+ movdqa \TMP2, \TMP1
+ pslldq $8, \TMP2
+ psrldq $8, \TMP1
+ por \TMP2, \TMP3
+
+ # reduce HashKey<<1
+
+ pshufd $0x24, \TMP1, \TMP2
+ pcmpeqd TWOONE(%rip), \TMP2
+ pand POLY(%rip), \TMP2
+ pxor \TMP2, \TMP3
+ movdqa \TMP3, HashKey(%arg2)
+
+ movdqa \TMP3, \TMP5
+ pshufd $78, \TMP3, \TMP1
+ pxor \TMP3, \TMP1
+ movdqa \TMP1, HashKey_k(%arg2)
+
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+ movdqa \TMP5, HashKey_2(%arg2)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+ pshufd $78, \TMP5, \TMP1
+ pxor \TMP5, \TMP1
+ movdqa \TMP1, HashKey_2_k(%arg2)
+
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+ movdqa \TMP5, HashKey_3(%arg2)
+ pshufd $78, \TMP5, \TMP1
+ pxor \TMP5, \TMP1
+ movdqa \TMP1, HashKey_3_k(%arg2)
+
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+ movdqa \TMP5, HashKey_4(%arg2)
+ pshufd $78, \TMP5, \TMP1
+ pxor \TMP5, \TMP1
+ movdqa \TMP1, HashKey_4_k(%arg2)
+.endm
+
+# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
+# Clobbers rax, r10-r13 and xmm0-xmm6, %xmm13
+.macro GCM_INIT Iv SUBKEY AAD AADLEN
+ mov \AADLEN, %r11
+ mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
+ xor %r11, %r11
+ mov %r11, InLen(%arg2) # ctx_data.in_length = 0
+ mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
+ mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
+ mov \Iv, %rax
+ movdqu (%rax), %xmm0
+ movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv
+
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm0
+ movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
+
+ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ movdqa HashKey(%arg2), %xmm13
+
+ CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
+ %xmm4, %xmm5, %xmm6
+.endm
+
+# GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context
+# struct has been initialized by GCM_INIT.
+# Requires the input data be at least 1 byte long because of READ_PARTIAL_BLOCK
+# Clobbers rax, r10-r13, and xmm0-xmm15
+.macro GCM_ENC_DEC operation
+ movdqu AadHash(%arg2), %xmm8
+ movdqu HashKey(%arg2), %xmm13
+ add %arg5, InLen(%arg2)
+
+ xor %r11, %r11 # initialise the data pointer offset as zero
+ PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
+
+ sub %r11, %arg5 # sub partial block data used
+ mov %arg5, %r13 # save the number of bytes
+
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+ mov %r13, %r12
+ # Encrypt/Decrypt first few blocks
+
+ and $(3<<4), %r12
+ jz _initial_num_blocks_is_0_\@
+ cmp $(2<<4), %r12
+ jb _initial_num_blocks_is_1_\@
+ je _initial_num_blocks_is_2_\@
+_initial_num_blocks_is_3_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation
+ sub $48, %r13
+ jmp _initial_blocks_\@
+_initial_num_blocks_is_2_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation
+ sub $32, %r13
+ jmp _initial_blocks_\@
+_initial_num_blocks_is_1_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation
+ sub $16, %r13
+ jmp _initial_blocks_\@
+_initial_num_blocks_is_0_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation
+_initial_blocks_\@:
+
+ # Main loop - Encrypt/Decrypt remaining blocks
+
+ cmp $0, %r13
+ je _zero_cipher_left_\@
+ sub $64, %r13
+ je _four_cipher_left_\@
+_crypt_by_4_\@:
+ GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \
+ %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \
+ %xmm7, %xmm8, enc
+ add $64, %r11
+ sub $64, %r13
+ jne _crypt_by_4_\@
+_four_cipher_left_\@:
+ GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_\@:
+ movdqu %xmm8, AadHash(%arg2)
+ movdqu %xmm0, CurCount(%arg2)
+
+ mov %arg5, %r13
+ and $15, %r13 # %r13 = arg5 (mod 16)
+ je _multiple_of_16_bytes_\@
+
+ mov %r13, PBlockLen(%arg2)
+
+ # Handle the last <16 Byte block separately
+ paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
+ movdqu %xmm0, CurCount(%arg2)
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm0
+
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
+ movdqu %xmm0, PBlockEncKey(%arg2)
+
+ cmp $16, %arg5
+ jge _large_enough_update_\@
+
+ lea (%arg4,%r11,1), %r10
+ mov %r13, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
+ jmp _data_read_\@
+
+_large_enough_update_\@:
+ sub $16, %r11
+ add %r13, %r11
+
+ # receive the last <16 Byte block
+ movdqu (%arg4, %r11, 1), %xmm1
+
+ sub %r13, %r11
+ add $16, %r11
+
+ lea SHIFT_MASK+16(%rip), %r12
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (r13 is the number of bytes in plaintext mod 16)
+ sub %r13, %r12
+ # get the appropriate shuffle mask
+ movdqu (%r12), %xmm2
+ # shift right 16-r13 bytes
+ PSHUFB_XMM %xmm2, %xmm1
+
+_data_read_\@:
+ lea ALL_F+16(%rip), %r12
+ sub %r13, %r12
+
+.ifc \operation, dec
+ movdqa %xmm1, %xmm2
+.endif
+ pxor %xmm1, %xmm0 # XOR Encrypt(K, Yn)
+ movdqu (%r12), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+.ifc \operation, dec
+ pand %xmm1, %xmm2
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10 ,%xmm2
+
+ pxor %xmm2, %xmm8
+.else
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10,%xmm0
+
+ pxor %xmm0, %xmm8
+.endif
+
+ movdqu %xmm8, AadHash(%arg2)
+.ifc \operation, enc
+ # GHASH computation for the last <16 byte block
+ movdqa SHUF_MASK(%rip), %xmm10
+ # shuffle xmm0 back to output as ciphertext
+ PSHUFB_XMM %xmm10, %xmm0
+.endif
+
+ # Output %r13 bytes
+ MOVQ_R64_XMM %xmm0, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left_\@
+ mov %rax, (%arg3 , %r11, 1)
+ add $8, %r11
+ psrldq $8, %xmm0
+ MOVQ_R64_XMM %xmm0, %rax
+ sub $8, %r13
+_less_than_8_bytes_left_\@:
+ mov %al, (%arg3, %r11, 1)
+ add $1, %r11
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left_\@
+_multiple_of_16_bytes_\@:
+.endm
+
+# GCM_COMPLETE Finishes update of tag of last partial block
+# Output: Authorization Tag (AUTH_TAG)
+# Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15
+.macro GCM_COMPLETE AUTHTAG AUTHTAGLEN
+ movdqu AadHash(%arg2), %xmm8
+ movdqu HashKey(%arg2), %xmm13
+
+ mov PBlockLen(%arg2), %r12
+
+ cmp $0, %r12
+ je _partial_done\@
+
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+
+_partial_done\@:
+ mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes)
+ shl $3, %r12 # convert into number of bits
+ movd %r12d, %xmm15 # len(A) in %xmm15
+ mov InLen(%arg2), %r12
+ shl $3, %r12 # len(C) in bits (*128)
+ MOVQ_R64_XMM %r12, %xmm1
+
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+ pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
+ pxor %xmm15, %xmm8
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+ # final GHASH computation
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm8
+
+ movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
+ pxor %xmm8, %xmm0
+_return_T_\@:
+ mov \AUTHTAG, %r10 # %r10 = authTag
+ mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len
+ cmp $16, %r11
+ je _T_16_\@
+ cmp $8, %r11
+ jl _T_4_\@
+_T_8_\@:
+ MOVQ_R64_XMM %xmm0, %rax
+ mov %rax, (%r10)
+ add $8, %r10
+ sub $8, %r11
+ psrldq $8, %xmm0
+ cmp $0, %r11
+ je _return_T_done_\@
+_T_4_\@:
+ movd %xmm0, %eax
+ mov %eax, (%r10)
+ add $4, %r10
+ sub $4, %r11
+ psrldq $4, %xmm0
+ cmp $0, %r11
+ je _return_T_done_\@
+_T_123_\@:
+ movd %xmm0, %eax
+ cmp $2, %r11
+ jl _T_1_\@
+ mov %ax, (%r10)
+ cmp $2, %r11
+ je _return_T_done_\@
+ add $2, %r10
+ sar $16, %eax
+_T_1_\@:
+ mov %al, (%r10)
+ jmp _return_T_done_\@
+_T_16_\@:
+ movdqu %xmm0, (%r10)
+_return_T_done_\@:
+.endm
#ifdef __x86_64__
/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
@@ -264,232 +598,188 @@ _read_next_byte_lt8_\@:
_done_read_partial_block_\@:
.endm
-/*
-* if a = number of total plaintext bytes
-* b = floor(a/16)
-* num_initial_blocks = b mod 4
-* encrypt the initial num_initial_blocks blocks and apply ghash on
-* the ciphertext
-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
-* are clobbered
-* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
-*/
-
-
-.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
-XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- MOVADQ SHUF_MASK(%rip), %xmm14
- mov arg7, %r10 # %r10 = AAD
- mov arg8, %r11 # %r11 = aadLen
- pxor %xmm\i, %xmm\i
- pxor \XMM2, \XMM2
+# CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+# clobbers r10-11, xmm14
+.macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
+ TMP6 TMP7
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov \AAD, %r10 # %r10 = AAD
+ mov \AADLEN, %r11 # %r11 = aadLen
+ pxor \TMP7, \TMP7
+ pxor \TMP6, \TMP6
cmp $16, %r11
- jl _get_AAD_rest\num_initial_blocks\operation
-_get_AAD_blocks\num_initial_blocks\operation:
- movdqu (%r10), %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor %xmm\i, \XMM2
- GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+ jl _get_AAD_rest\@
+_get_AAD_blocks\@:
+ movdqu (%r10), \TMP7
+ PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pxor \TMP7, \TMP6
+ GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
add $16, %r10
sub $16, %r11
cmp $16, %r11
- jge _get_AAD_blocks\num_initial_blocks\operation
+ jge _get_AAD_blocks\@
- movdqu \XMM2, %xmm\i
+ movdqu \TMP6, \TMP7
/* read the last <16B of AAD */
-_get_AAD_rest\num_initial_blocks\operation:
+_get_AAD_rest\@:
cmp $0, %r11
- je _get_AAD_done\num_initial_blocks\operation
-
- READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor \XMM2, %xmm\i
- GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+ je _get_AAD_done\@
-_get_AAD_done\num_initial_blocks\operation:
- xor %r11, %r11 # initialise the data pointer offset as zero
- # start AES for num_initial_blocks blocks
-
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), \XMM0 # XMM0 = Y0
- PSHUFB_XMM %xmm14, \XMM0
+ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
+ PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pxor \TMP6, \TMP7
+ GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
+ movdqu \TMP7, \TMP6
-.if (\i == 5) || (\i == 6) || (\i == 7)
- MOVADQ ONE(%RIP),\TMP1
- MOVADQ (%arg1),\TMP2
-.irpc index, \i_seq
- paddd \TMP1, \XMM0 # INCR Y0
- movdqa \XMM0, %xmm\index
- PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
- pxor \TMP2, %xmm\index
-.endr
- lea 0x10(%arg1),%r10
- mov keysize,%eax
- shr $2,%eax # 128->4, 192->6, 256->8
- add $5,%eax # 128->9, 192->11, 256->13
-
-aes_loop_initial_dec\num_initial_blocks:
- MOVADQ (%r10),\TMP1
-.irpc index, \i_seq
- AESENC \TMP1, %xmm\index
-.endr
- add $16,%r10
- sub $1,%eax
- jnz aes_loop_initial_dec\num_initial_blocks
-
- MOVADQ (%r10), \TMP1
-.irpc index, \i_seq
- AESENCLAST \TMP1, %xmm\index # Last Round
-.endr
-.irpc index, \i_seq
- movdqu (%arg3 , %r11, 1), \TMP1
- pxor \TMP1, %xmm\index
- movdqu %xmm\index, (%arg2 , %r11, 1)
- # write back plaintext/ciphertext for num_initial_blocks
- add $16, %r11
-
- movdqa \TMP1, %xmm\index
- PSHUFB_XMM %xmm14, %xmm\index
- # prepare plaintext/ciphertext for GHASH computation
-.endr
-.endif
-
- # apply GHASH on num_initial_blocks blocks
+_get_AAD_done\@:
+ movdqu \TMP6, AadHash(%arg2)
+.endm
-.if \i == 5
- pxor %xmm5, %xmm6
- GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- pxor %xmm6, %xmm7
- GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- pxor %xmm7, %xmm8
- GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
-.elseif \i == 6
- pxor %xmm6, %xmm7
- GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- pxor %xmm7, %xmm8
- GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
-.elseif \i == 7
- pxor %xmm7, %xmm8
- GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
+# between update calls.
+# Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK
+# Outputs encrypted bytes, and updates hash and partial info in gcm_data_context
+# Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13
+.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
+ AAD_HASH operation
+ mov PBlockLen(%arg2), %r13
+ cmp $0, %r13
+ je _partial_block_done_\@ # Leave Macro if no partial blocks
+ # Read in input data without over reading
+ cmp $16, \PLAIN_CYPH_LEN
+ jl _fewer_than_16_bytes_\@
+ movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
+ jmp _data_read_\@
+
+_fewer_than_16_bytes_\@:
+ lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
+ mov \PLAIN_CYPH_LEN, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1
+
+ mov PBlockLen(%arg2), %r13
+
+_data_read_\@: # Finished reading in data
+
+ movdqu PBlockEncKey(%arg2), %xmm9
+ movdqu HashKey(%arg2), %xmm13
+
+ lea SHIFT_MASK(%rip), %r12
+
+ # adjust the shuffle mask pointer to be able to shift r13 bytes
+ # r16-r13 is the number of bytes in plaintext mod 16)
+ add %r13, %r12
+ movdqu (%r12), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes
+
+.ifc \operation, dec
+ movdqa %xmm1, %xmm3
+ pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+
+ mov \PLAIN_CYPH_LEN, %r10
+ add %r13, %r10
+ # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
+ sub $16, %r10
+ # Determine if if partial block is not being filled and
+ # shift mask accordingly
+ jge _no_extra_mask_1_\@
+ sub %r10, %r12
+_no_extra_mask_1_\@:
+
+ movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ # get the appropriate mask to mask out bottom r13 bytes of xmm9
+ pand %xmm1, %xmm9 # mask out bottom r13 bytes of xmm9
+
+ pand %xmm1, %xmm3
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm3
+ PSHUFB_XMM %xmm2, %xmm3
+ pxor %xmm3, \AAD_HASH
+
+ cmp $0, %r10
+ jl _partial_incomplete_1_\@
+
+ # GHASH computation for the last <16 Byte block
+ GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ xor %rax,%rax
+
+ mov %rax, PBlockLen(%arg2)
+ jmp _dec_done_\@
+_partial_incomplete_1_\@:
+ add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
+_dec_done_\@:
+ movdqu \AAD_HASH, AadHash(%arg2)
+.else
+ pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn)
+
+ mov \PLAIN_CYPH_LEN, %r10
+ add %r13, %r10
+ # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
+ sub $16, %r10
+ # Determine if if partial block is not being filled and
+ # shift mask accordingly
+ jge _no_extra_mask_2_\@
+ sub %r10, %r12
+_no_extra_mask_2_\@:
+
+ movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ # get the appropriate mask to mask out bottom r13 bytes of xmm9
+ pand %xmm1, %xmm9
+
+ movdqa SHUF_MASK(%rip), %xmm1
+ PSHUFB_XMM %xmm1, %xmm9
+ PSHUFB_XMM %xmm2, %xmm9
+ pxor %xmm9, \AAD_HASH
+
+ cmp $0, %r10
+ jl _partial_incomplete_2_\@
+
+ # GHASH computation for the last <16 Byte block
+ GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ xor %rax,%rax
+
+ mov %rax, PBlockLen(%arg2)
+ jmp _encode_done_\@
+_partial_incomplete_2_\@:
+ add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
+_encode_done_\@:
+ movdqu \AAD_HASH, AadHash(%arg2)
+
+ movdqa SHUF_MASK(%rip), %xmm10
+ # shuffle xmm9 back to output as ciphertext
+ PSHUFB_XMM %xmm10, %xmm9
+ PSHUFB_XMM %xmm2, %xmm9
.endif
- cmp $64, %r13
- jl _initial_blocks_done\num_initial_blocks\operation
- # no need for precomputed values
-/*
-*
-* Precomputations for HashKey parallel with encryption of first 4 blocks.
-* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
-*/
- MOVADQ ONE(%rip), \TMP1
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM1
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
-
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM2
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
-
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM3
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
-
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM4
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
-
- MOVADQ 0(%arg1),\TMP1
- pxor \TMP1, \XMM1
- pxor \TMP1, \XMM2
- pxor \TMP1, \XMM3
- pxor \TMP1, \XMM4
- movdqa \TMP3, \TMP5
- pshufd $78, \TMP3, \TMP1
- pxor \TMP3, \TMP1
- movdqa \TMP1, HashKey_k(%rsp)
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^2<<1 (mod poly)
- movdqa \TMP5, HashKey_2(%rsp)
-# HashKey_2 = HashKey^2<<1 (mod poly)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_2_k(%rsp)
-.irpc index, 1234 # do 4 rounds
- movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
-.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_3(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_3_k(%rsp)
-.irpc index, 56789 # do next 5 rounds
- movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
-.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_4(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_4_k(%rsp)
- lea 0xa0(%arg1),%r10
- mov keysize,%eax
- shr $2,%eax # 128->4, 192->6, 256->8
- sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_pre_dec_done\num_initial_blocks
-
-aes_loop_pre_dec\num_initial_blocks:
- MOVADQ (%r10),\TMP2
-.irpc index, 1234
- AESENC \TMP2, %xmm\index
-.endr
- add $16,%r10
- sub $1,%eax
- jnz aes_loop_pre_dec\num_initial_blocks
-
-aes_loop_pre_dec_done\num_initial_blocks:
- MOVADQ (%r10), \TMP2
- AESENCLAST \TMP2, \XMM1
- AESENCLAST \TMP2, \XMM2
- AESENCLAST \TMP2, \XMM3
- AESENCLAST \TMP2, \XMM4
- movdqu 16*0(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM1
- movdqu \XMM1, 16*0(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM1
- movdqu 16*1(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM2
- movdqu \XMM2, 16*1(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM2
- movdqu 16*2(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM3
- movdqu \XMM3, 16*2(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM3
- movdqu 16*3(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM4
- movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM4
- add $64, %r11
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
- pxor \XMMDst, \XMM1
-# combine GHASHed value with the corresponding ciphertext
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
-
-_initial_blocks_done\num_initial_blocks\operation:
-
-.endm
+ # output encrypted Bytes
+ cmp $0, %r10
+ jl _partial_fill_\@
+ mov %r13, %r12
+ mov $16, %r13
+ # Set r13 to be the number of bytes to write out
+ sub %r12, %r13
+ jmp _count_set_\@
+_partial_fill_\@:
+ mov \PLAIN_CYPH_LEN, %r13
+_count_set_\@:
+ movdqa %xmm9, %xmm0
+ MOVQ_R64_XMM %xmm0, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left_\@
+ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
+ add $8, \DATA_OFFSET
+ psrldq $8, %xmm0
+ MOVQ_R64_XMM %xmm0, %rax
+ sub $8, %r13
+_less_than_8_bytes_left_\@:
+ movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
+ add $1, \DATA_OFFSET
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left_\@
+_partial_block_done_\@:
+.endm # PARTIAL_BLOCK
/*
* if a = number of total plaintext bytes
@@ -499,49 +789,19 @@ _initial_blocks_done\num_initial_blocks\operation:
* the ciphertext
* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
* are clobbered
-* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+* arg1, %arg2, %arg3 are used as a pointer only, not modified
*/
-.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
-XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- MOVADQ SHUF_MASK(%rip), %xmm14
- mov arg7, %r10 # %r10 = AAD
- mov arg8, %r11 # %r11 = aadLen
- pxor %xmm\i, %xmm\i
- pxor \XMM2, \XMM2
+.macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
- cmp $16, %r11
- jl _get_AAD_rest\num_initial_blocks\operation
-_get_AAD_blocks\num_initial_blocks\operation:
- movdqu (%r10), %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor %xmm\i, \XMM2
- GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- add $16, %r10
- sub $16, %r11
- cmp $16, %r11
- jge _get_AAD_blocks\num_initial_blocks\operation
+ movdqu AadHash(%arg2), %xmm\i # XMM0 = Y0
- movdqu \XMM2, %xmm\i
-
- /* read the last <16B of AAD */
-_get_AAD_rest\num_initial_blocks\operation:
- cmp $0, %r11
- je _get_AAD_done\num_initial_blocks\operation
-
- READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor \XMM2, %xmm\i
- GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
-
-_get_AAD_done\num_initial_blocks\operation:
- xor %r11, %r11 # initialise the data pointer offset as zero
# start AES for num_initial_blocks blocks
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), \XMM0 # XMM0 = Y0
- PSHUFB_XMM %xmm14, \XMM0
+ movdqu CurCount(%arg2), \XMM0 # XMM0 = Y0
.if (\i == 5) || (\i == 6) || (\i == 7)
@@ -549,7 +809,11 @@ _get_AAD_done\num_initial_blocks\operation:
MOVADQ 0(%arg1),\TMP2
.irpc index, \i_seq
paddd \TMP1, \XMM0 # INCR Y0
+.ifc \operation, dec
+ movdqa \XMM0, %xmm\index
+.else
MOVADQ \XMM0, %xmm\index
+.endif
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
pxor \TMP2, %xmm\index
.endr
@@ -558,25 +822,29 @@ _get_AAD_done\num_initial_blocks\operation:
shr $2,%eax # 128->4, 192->6, 256->8
add $5,%eax # 128->9, 192->11, 256->13
-aes_loop_initial_enc\num_initial_blocks:
+aes_loop_initial_\@:
MOVADQ (%r10),\TMP1
.irpc index, \i_seq
AESENC \TMP1, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_initial_enc\num_initial_blocks
+ jnz aes_loop_initial_\@
MOVADQ (%r10), \TMP1
.irpc index, \i_seq
AESENCLAST \TMP1, %xmm\index # Last Round
.endr
.irpc index, \i_seq
- movdqu (%arg3 , %r11, 1), \TMP1
+ movdqu (%arg4 , %r11, 1), \TMP1
pxor \TMP1, %xmm\index
- movdqu %xmm\index, (%arg2 , %r11, 1)
+ movdqu %xmm\index, (%arg3 , %r11, 1)
# write back plaintext/ciphertext for num_initial_blocks
add $16, %r11
+
+.ifc \operation, dec
+ movdqa \TMP1, %xmm\index
+.endif
PSHUFB_XMM %xmm14, %xmm\index
# prepare plaintext/ciphertext for GHASH computation
@@ -602,7 +870,7 @@ aes_loop_initial_enc\num_initial_blocks:
GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
.endif
cmp $64, %r13
- jl _initial_blocks_done\num_initial_blocks\operation
+ jl _initial_blocks_done\@
# no need for precomputed values
/*
*
@@ -631,17 +899,6 @@ aes_loop_initial_enc\num_initial_blocks:
pxor \TMP1, \XMM2
pxor \TMP1, \XMM3
pxor \TMP1, \XMM4
- movdqa \TMP3, \TMP5
- pshufd $78, \TMP3, \TMP1
- pxor \TMP3, \TMP1
- movdqa \TMP1, HashKey_k(%rsp)
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^2<<1 (mod poly)
- movdqa \TMP5, HashKey_2(%rsp)
-# HashKey_2 = HashKey^2<<1 (mod poly)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_2_k(%rsp)
.irpc index, 1234 # do 4 rounds
movaps 0x10*\index(%arg1), \TMP1
AESENC \TMP1, \XMM1
@@ -649,12 +906,6 @@ aes_loop_initial_enc\num_initial_blocks:
AESENC \TMP1, \XMM3
AESENC \TMP1, \XMM4
.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_3(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_3_k(%rsp)
.irpc index, 56789 # do next 5 rounds
movaps 0x10*\index(%arg1), \TMP1
AESENC \TMP1, \XMM1
@@ -662,45 +913,56 @@ aes_loop_initial_enc\num_initial_blocks:
AESENC \TMP1, \XMM3
AESENC \TMP1, \XMM4
.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_4(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_4_k(%rsp)
lea 0xa0(%arg1),%r10
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_pre_enc_done\num_initial_blocks
+ jz aes_loop_pre_done\@
-aes_loop_pre_enc\num_initial_blocks:
+aes_loop_pre_\@:
MOVADQ (%r10),\TMP2
.irpc index, 1234
AESENC \TMP2, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_pre_enc\num_initial_blocks
+ jnz aes_loop_pre_\@
-aes_loop_pre_enc_done\num_initial_blocks:
+aes_loop_pre_done\@:
MOVADQ (%r10), \TMP2
AESENCLAST \TMP2, \XMM1
AESENCLAST \TMP2, \XMM2
AESENCLAST \TMP2, \XMM3
AESENCLAST \TMP2, \XMM4
- movdqu 16*0(%arg3 , %r11 , 1), \TMP1
+ movdqu 16*0(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM1
- movdqu 16*1(%arg3 , %r11 , 1), \TMP1
+.ifc \operation, dec
+ movdqu \XMM1, 16*0(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM1
+.endif
+ movdqu 16*1(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM2
- movdqu 16*2(%arg3 , %r11 , 1), \TMP1
+.ifc \operation, dec
+ movdqu \XMM2, 16*1(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM2
+.endif
+ movdqu 16*2(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM3
- movdqu 16*3(%arg3 , %r11 , 1), \TMP1
+.ifc \operation, dec
+ movdqu \XMM3, 16*2(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM3
+.endif
+ movdqu 16*3(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM4
- movdqu \XMM1, 16*0(%arg2 , %r11 , 1)
- movdqu \XMM2, 16*1(%arg2 , %r11 , 1)
- movdqu \XMM3, 16*2(%arg2 , %r11 , 1)
- movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
+.ifc \operation, dec
+ movdqu \XMM4, 16*3(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM4
+.else
+ movdqu \XMM1, 16*0(%arg3 , %r11 , 1)
+ movdqu \XMM2, 16*1(%arg3 , %r11 , 1)
+ movdqu \XMM3, 16*2(%arg3 , %r11 , 1)
+ movdqu \XMM4, 16*3(%arg3 , %r11 , 1)
+.endif
add $64, %r11
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
@@ -710,14 +972,14 @@ aes_loop_pre_enc_done\num_initial_blocks:
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
-_initial_blocks_done\num_initial_blocks\operation:
+_initial_blocks_done\@:
.endm
/*
* encrypt 4 blocks at a time
* ghash the 4 previously encrypted ciphertext blocks
-* arg1, %arg2, %arg3 are used as pointers only, not modified
+* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
@@ -735,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%rsp), \TMP5
+ movdqa HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -754,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%rsp), \TMP5
+ movdqa HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -769,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%rsp), \TMP5
+ movdqa HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -782,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%rsp), \TMP5
+ movdqa HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -796,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%rsp ), \TMP5
+ movdqa HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -812,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%rsp), \TMP5
+ movdqa HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -830,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%rsp), \TMP5
+ movdqa HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -842,37 +1104,37 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_par_enc_done
+ jz aes_loop_par_enc_done\@
-aes_loop_par_enc:
+aes_loop_par_enc\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
AESENC \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_par_enc
+ jnz aes_loop_par_enc\@
-aes_loop_par_enc_done:
+aes_loop_par_enc_done\@:
MOVADQ (%r10), \TMP3
AESENCLAST \TMP3, \XMM1 # Round 10
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%rsp), \TMP5
+ movdqa HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
- movdqu (%arg3,%r11,1), \TMP3
+ movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
- movdqu 16(%arg3,%r11,1), \TMP3
+ movdqu 16(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
- movdqu 32(%arg3,%r11,1), \TMP3
+ movdqu 32(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
- movdqu 48(%arg3,%r11,1), \TMP3
+ movdqu 48(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
- movdqu \XMM1, (%arg2,%r11,1) # Write to the ciphertext buffer
- movdqu \XMM2, 16(%arg2,%r11,1) # Write to the ciphertext buffer
- movdqu \XMM3, 32(%arg2,%r11,1) # Write to the ciphertext buffer
- movdqu \XMM4, 48(%arg2,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM1, (%arg3,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer
PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
@@ -925,7 +1187,7 @@ aes_loop_par_enc_done:
/*
* decrypt 4 blocks at a time
* ghash the 4 previously decrypted ciphertext blocks
-* arg1, %arg2, %arg3 are used as pointers only, not modified
+* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
@@ -943,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%rsp), \TMP5
+ movdqa HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -962,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%rsp), \TMP5
+ movdqa HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -977,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%rsp), \TMP5
+ movdqa HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -990,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%rsp), \TMP5
+ movdqa HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -1004,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%rsp ), \TMP5
+ movdqa HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -1020,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%rsp), \TMP5
+ movdqa HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -1038,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%rsp), \TMP5
+ movdqa HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -1050,40 +1312,40 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_par_dec_done
+ jz aes_loop_par_dec_done\@
-aes_loop_par_dec:
+aes_loop_par_dec\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
AESENC \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_par_dec
+ jnz aes_loop_par_dec\@
-aes_loop_par_dec_done:
+aes_loop_par_dec_done\@:
MOVADQ (%r10), \TMP3
AESENCLAST \TMP3, \XMM1 # last round
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%rsp), \TMP5
+ movdqa HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
- movdqu (%arg3,%r11,1), \TMP3
+ movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
- movdqu \XMM1, (%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM1
- movdqu 16(%arg3,%r11,1), \TMP3
+ movdqu 16(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
- movdqu \XMM2, 16(%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM2, 16(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM2
- movdqu 32(%arg3,%r11,1), \TMP3
+ movdqu 32(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
- movdqu \XMM3, 32(%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM3, 32(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM3
- movdqu 48(%arg3,%r11,1), \TMP3
+ movdqu 48(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
- movdqu \XMM4, 48(%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM4
PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
@@ -1143,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM1, \TMP6
pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2
- movdqa HashKey_4(%rsp), \TMP5
+ movdqa HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
- movdqa HashKey_4_k(%rsp), \TMP4
+ movdqa HashKey_4_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1156,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM2, \TMP1
pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2
- movdqa HashKey_3(%rsp), \TMP5
+ movdqa HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
- movdqa HashKey_3_k(%rsp), \TMP4
+ movdqa HashKey_3_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst
@@ -1171,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM3, \TMP1
pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2
- movdqa HashKey_2(%rsp), \TMP5
+ movdqa HashKey_2(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
- movdqa HashKey_2_k(%rsp), \TMP4
+ movdqa HashKey_2_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst
@@ -1184,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM4, \TMP1
pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2
- movdqa HashKey(%rsp), \TMP5
+ movdqa HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
- movdqa HashKey_k(%rsp), \TMP4
+ movdqa HashKey_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst
@@ -1256,6 +1518,8 @@ _esb_loop_\@:
.endm
/*****************************************************************************
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data
+* // Context data
* u8 *out, // Plaintext output. Encrypt in-place is allowed.
* const u8 *in, // Ciphertext input
* u64 plaintext_len, // Length of data in bytes for decryption.
@@ -1333,195 +1597,20 @@ _esb_loop_\@:
*
*****************************************************************************/
ENTRY(aesni_gcm_dec)
- push %r12
- push %r13
- push %r14
- mov %rsp, %r14
-/*
-* states of %xmm registers %xmm6:%xmm15 not saved
-* all %xmm registers are clobbered
-*/
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
- mov %arg6, %r12
- movdqu (%r12), %xmm13 # %xmm13 = HashKey
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-
-# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH)
-
- movdqa %xmm13, %xmm2
- psllq $1, %xmm13
- psrlq $63, %xmm2
- movdqa %xmm2, %xmm1
- pslldq $8, %xmm2
- psrldq $8, %xmm1
- por %xmm2, %xmm13
-
- # Reduction
-
- pshufd $0x24, %xmm1, %xmm2
- pcmpeqd TWOONE(%rip), %xmm2
- pand POLY(%rip), %xmm2
- pxor %xmm2, %xmm13 # %xmm13 holds the HashKey<<1 (mod poly)
-
-
- # Decrypt first few blocks
-
- movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
- mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
- and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
- mov %r13, %r12
- and $(3<<4), %r12
- jz _initial_num_blocks_is_0_decrypt
- cmp $(2<<4), %r12
- jb _initial_num_blocks_is_1_decrypt
- je _initial_num_blocks_is_2_decrypt
-_initial_num_blocks_is_3_decrypt:
- INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec
- sub $48, %r13
- jmp _initial_blocks_decrypted
-_initial_num_blocks_is_2_decrypt:
- INITIAL_BLOCKS_DEC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec
- sub $32, %r13
- jmp _initial_blocks_decrypted
-_initial_num_blocks_is_1_decrypt:
- INITIAL_BLOCKS_DEC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec
- sub $16, %r13
- jmp _initial_blocks_decrypted
-_initial_num_blocks_is_0_decrypt:
- INITIAL_BLOCKS_DEC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec
-_initial_blocks_decrypted:
- cmp $0, %r13
- je _zero_cipher_left_decrypt
- sub $64, %r13
- je _four_cipher_left_decrypt
-_decrypt_by_4:
- GHASH_4_ENCRYPT_4_PARALLEL_DEC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
-%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec
- add $64, %r11
- sub $64, %r13
- jne _decrypt_by_4
-_four_cipher_left_decrypt:
- GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
-%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
-_zero_cipher_left_decrypt:
- mov %arg4, %r13
- and $15, %r13 # %r13 = arg4 (mod 16)
- je _multiple_of_16_bytes_decrypt
-
- # Handle the last <16 byte block separately
-
- paddd ONE(%rip), %xmm0 # increment CNT to get Yn
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
-
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
-
- lea (%arg3,%r11,1), %r10
- mov %r13, %r12
- READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
-
- lea ALL_F+16(%rip), %r12
- sub %r13, %r12
- movdqa %xmm1, %xmm2
- pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
- movdqu (%r12), %xmm1
- # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm2
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10 ,%xmm2
-
- pxor %xmm2, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
-
- # output %r13 bytes
- MOVQ_R64_XMM %xmm0, %rax
- cmp $8, %r13
- jle _less_than_8_bytes_left_decrypt
- mov %rax, (%arg2 , %r11, 1)
- add $8, %r11
- psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
- sub $8, %r13
-_less_than_8_bytes_left_decrypt:
- mov %al, (%arg2, %r11, 1)
- add $1, %r11
- shr $8, %rax
- sub $1, %r13
- jne _less_than_8_bytes_left_decrypt
-_multiple_of_16_bytes_decrypt:
- mov arg8, %r12 # %r13 = aadLen (number of bytes)
- shl $3, %r12 # convert into number of bits
- movd %r12d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
- pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
- pxor %xmm15, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # final GHASH computation
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm8
+ FUNC_SAVE
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), %xmm0 # %xmm0 = Y0
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
- pxor %xmm8, %xmm0
-_return_T_decrypt:
- mov arg9, %r10 # %r10 = authTag
- mov arg10, %r11 # %r11 = auth_tag_len
- cmp $16, %r11
- je _T_16_decrypt
- cmp $8, %r11
- jl _T_4_decrypt
-_T_8_decrypt:
- MOVQ_R64_XMM %xmm0, %rax
- mov %rax, (%r10)
- add $8, %r10
- sub $8, %r11
- psrldq $8, %xmm0
- cmp $0, %r11
- je _return_T_done_decrypt
-_T_4_decrypt:
- movd %xmm0, %eax
- mov %eax, (%r10)
- add $4, %r10
- sub $4, %r11
- psrldq $4, %xmm0
- cmp $0, %r11
- je _return_T_done_decrypt
-_T_123_decrypt:
- movd %xmm0, %eax
- cmp $2, %r11
- jl _T_1_decrypt
- mov %ax, (%r10)
- cmp $2, %r11
- je _return_T_done_decrypt
- add $2, %r10
- sar $16, %eax
-_T_1_decrypt:
- mov %al, (%r10)
- jmp _return_T_done_decrypt
-_T_16_decrypt:
- movdqu %xmm0, (%r10)
-_return_T_done_decrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
- pop %r12
+ GCM_INIT %arg6, arg7, arg8, arg9
+ GCM_ENC_DEC dec
+ GCM_COMPLETE arg10, arg11
+ FUNC_RESTORE
ret
ENDPROC(aesni_gcm_dec)
/*****************************************************************************
* void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data
+* // Context data
* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
* const u8 *in, // Plaintext input
* u64 plaintext_len, // Length of data in bytes for encryption.
@@ -1596,195 +1685,78 @@ ENDPROC(aesni_gcm_dec)
* poly = x^128 + x^127 + x^126 + x^121 + 1
***************************************************************************/
ENTRY(aesni_gcm_enc)
- push %r12
- push %r13
- push %r14
- mov %rsp, %r14
-#
-# states of %xmm registers %xmm6:%xmm15 not saved
-# all %xmm registers are clobbered
-#
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp
- mov %arg6, %r12
- movdqu (%r12), %xmm13
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-
-# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
-
- movdqa %xmm13, %xmm2
- psllq $1, %xmm13
- psrlq $63, %xmm2
- movdqa %xmm2, %xmm1
- pslldq $8, %xmm2
- psrldq $8, %xmm1
- por %xmm2, %xmm13
-
- # reduce HashKey<<1
-
- pshufd $0x24, %xmm1, %xmm2
- pcmpeqd TWOONE(%rip), %xmm2
- pand POLY(%rip), %xmm2
- pxor %xmm2, %xmm13
- movdqa %xmm13, HashKey(%rsp)
- mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
- and $-16, %r13
- mov %r13, %r12
+ FUNC_SAVE
- # Encrypt first few blocks
+ GCM_INIT %arg6, arg7, arg8, arg9
+ GCM_ENC_DEC enc
- and $(3<<4), %r12
- jz _initial_num_blocks_is_0_encrypt
- cmp $(2<<4), %r12
- jb _initial_num_blocks_is_1_encrypt
- je _initial_num_blocks_is_2_encrypt
-_initial_num_blocks_is_3_encrypt:
- INITIAL_BLOCKS_ENC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc
- sub $48, %r13
- jmp _initial_blocks_encrypted
-_initial_num_blocks_is_2_encrypt:
- INITIAL_BLOCKS_ENC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc
- sub $32, %r13
- jmp _initial_blocks_encrypted
-_initial_num_blocks_is_1_encrypt:
- INITIAL_BLOCKS_ENC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc
- sub $16, %r13
- jmp _initial_blocks_encrypted
-_initial_num_blocks_is_0_encrypt:
- INITIAL_BLOCKS_ENC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc
-_initial_blocks_encrypted:
-
- # Main loop - Encrypt remaining blocks
-
- cmp $0, %r13
- je _zero_cipher_left_encrypt
- sub $64, %r13
- je _four_cipher_left_encrypt
-_encrypt_by_4_encrypt:
- GHASH_4_ENCRYPT_4_PARALLEL_ENC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
-%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc
- add $64, %r11
- sub $64, %r13
- jne _encrypt_by_4_encrypt
-_four_cipher_left_encrypt:
- GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
-%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
-_zero_cipher_left_encrypt:
- mov %arg4, %r13
- and $15, %r13 # %r13 = arg4 (mod 16)
- je _multiple_of_16_bytes_encrypt
-
- # Handle the last <16 Byte block separately
- paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
-
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
-
- lea (%arg3,%r11,1), %r10
- mov %r13, %r12
- READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
-
- lea ALL_F+16(%rip), %r12
- sub %r13, %r12
- pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
- movdqu (%r12), %xmm1
- # get the appropriate mask to mask out top 16-r13 bytes of xmm0
- pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10,%xmm0
+ GCM_COMPLETE arg10, arg11
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_enc)
- pxor %xmm0, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # GHASH computation for the last <16 byte block
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
+/*****************************************************************************
+* void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association)
+* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+* // concatenated with 0x00000001. 16-byte aligned pointer.
+* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary.
+* const u8 *aad, // Additional Authentication Data (AAD)
+* u64 aad_len) // Length of AAD in bytes.
+*/
+ENTRY(aesni_gcm_init)
+ FUNC_SAVE
+ GCM_INIT %arg3, %arg4,%arg5, %arg6
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_init)
- # shuffle xmm0 back to output as ciphertext
+/*****************************************************************************
+* void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
+* const u8 *in, // Plaintext input
+* u64 plaintext_len, // Length of data in bytes for encryption.
+*/
+ENTRY(aesni_gcm_enc_update)
+ FUNC_SAVE
+ GCM_ENC_DEC enc
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_enc_update)
- # Output %r13 bytes
- MOVQ_R64_XMM %xmm0, %rax
- cmp $8, %r13
- jle _less_than_8_bytes_left_encrypt
- mov %rax, (%arg2 , %r11, 1)
- add $8, %r11
- psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
- sub $8, %r13
-_less_than_8_bytes_left_encrypt:
- mov %al, (%arg2, %r11, 1)
- add $1, %r11
- shr $8, %rax
- sub $1, %r13
- jne _less_than_8_bytes_left_encrypt
-_multiple_of_16_bytes_encrypt:
- mov arg8, %r12 # %r12 = addLen (number of bytes)
- shl $3, %r12
- movd %r12d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
- pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
- pxor %xmm15, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # final GHASH computation
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm8 # perform a 16 byte swap
+/*****************************************************************************
+* void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
+* const u8 *in, // Plaintext input
+* u64 plaintext_len, // Length of data in bytes for encryption.
+*/
+ENTRY(aesni_gcm_dec_update)
+ FUNC_SAVE
+ GCM_ENC_DEC dec
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_dec_update)
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), %xmm0 # %xmm0 = Y0
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm15 # Encrypt(K, Y0)
- pxor %xmm8, %xmm0
-_return_T_encrypt:
- mov arg9, %r10 # %r10 = authTag
- mov arg10, %r11 # %r11 = auth_tag_len
- cmp $16, %r11
- je _T_16_encrypt
- cmp $8, %r11
- jl _T_4_encrypt
-_T_8_encrypt:
- MOVQ_R64_XMM %xmm0, %rax
- mov %rax, (%r10)
- add $8, %r10
- sub $8, %r11
- psrldq $8, %xmm0
- cmp $0, %r11
- je _return_T_done_encrypt
-_T_4_encrypt:
- movd %xmm0, %eax
- mov %eax, (%r10)
- add $4, %r10
- sub $4, %r11
- psrldq $4, %xmm0
- cmp $0, %r11
- je _return_T_done_encrypt
-_T_123_encrypt:
- movd %xmm0, %eax
- cmp $2, %r11
- jl _T_1_encrypt
- mov %ax, (%r10)
- cmp $2, %r11
- je _return_T_done_encrypt
- add $2, %r10
- sar $16, %eax
-_T_1_encrypt:
- mov %al, (%r10)
- jmp _return_T_done_encrypt
-_T_16_encrypt:
- movdqu %xmm0, (%r10)
-_return_T_done_encrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
- pop %r12
+/*****************************************************************************
+* void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *auth_tag, // Authenticated Tag output.
+* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
+* // 12 or 8.
+*/
+ENTRY(aesni_gcm_finalize)
+ FUNC_SAVE
+ GCM_COMPLETE %arg3 %arg4
+ FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc)
+ENDPROC(aesni_gcm_finalize)
#endif
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 34cf1c1f8c98..acbe7e8336d8 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -72,6 +72,21 @@ struct aesni_xts_ctx {
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
};
+#define GCM_BLOCK_LEN 16
+
+struct gcm_context_data {
+ /* init, update and finalize context data */
+ u8 aad_hash[GCM_BLOCK_LEN];
+ u64 aad_length;
+ u64 in_length;
+ u8 partial_block_enc_key[GCM_BLOCK_LEN];
+ u8 orig_IV[GCM_BLOCK_LEN];
+ u8 current_counter[GCM_BLOCK_LEN];
+ u64 partial_block_len;
+ u64 unused;
+ u8 hash_keys[GCM_BLOCK_LEN * 8];
+};
+
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
@@ -105,6 +120,7 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
/* asmlinkage void aesni_gcm_enc()
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * struct gcm_context_data. May be uninitialized.
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
* const u8 *in, Plaintext input
* unsigned long plaintext_len, Length of data in bytes for encryption.
@@ -117,13 +133,15 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
* unsigned long auth_tag_len), Authenticated Tag Length in bytes.
* Valid values are 16 (most likely), 12 or 8.
*/
-asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_enc(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
/* asmlinkage void aesni_gcm_dec()
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * struct gcm_context_data. May be uninitialized.
* u8 *out, Plaintext output. Decrypt in-place is allowed.
* const u8 *in, Ciphertext input
* unsigned long ciphertext_len, Length of data in bytes for decryption.
@@ -137,11 +155,28 @@ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
* unsigned long auth_tag_len) Authenticated Tag Length in bytes.
* Valid values are 16 (most likely), 12 or 8.
*/
-asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_dec(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
+/* Scatter / Gather routines, with args similar to above */
+asmlinkage void aesni_gcm_init(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *iv,
+ u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len);
+asmlinkage void aesni_gcm_enc_update(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in, unsigned long plaintext_len);
+asmlinkage void aesni_gcm_dec_update(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in,
+ unsigned long ciphertext_len);
+asmlinkage void aesni_gcm_finalize(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *auth_tag, unsigned long auth_tag_len);
#ifdef CONFIG_AS_AVX
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
@@ -167,15 +202,17 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-static void aesni_gcm_enc_avx(void *ctx, u8 *out,
+static void aesni_gcm_enc_avx(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
- aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_enc(ctx, data, out, in,
+ plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
@@ -183,15 +220,17 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out,
}
}
-static void aesni_gcm_dec_avx(void *ctx, u8 *out,
+static void aesni_gcm_dec_avx(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_dec(ctx, data, out, in,
+ ciphertext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
@@ -218,15 +257,17 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
+static void aesni_gcm_enc_avx2(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_enc(ctx, data, out, in,
+ plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
@@ -238,15 +279,17 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
}
}
-static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
+static void aesni_gcm_dec_avx2(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
- aad, aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_dec(ctx, data, out, in,
+ ciphertext_len, iv, hash_subkey,
+ aad, aad_len, auth_tag, auth_tag_len);
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
@@ -259,15 +302,19 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
}
#endif
-static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
+static void (*aesni_gcm_enc_tfm)(void *ctx,
+ struct gcm_context_data *data, u8 *out,
+ const u8 *in, unsigned long plaintext_len,
+ u8 *iv, u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len, u8 *auth_tag,
+ unsigned long auth_tag_len);
-static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
+static void (*aesni_gcm_dec_tfm)(void *ctx,
+ struct gcm_context_data *data, u8 *out,
+ const u8 *in, unsigned long ciphertext_len,
+ u8 *iv, u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len, u8 *auth_tag,
+ unsigned long auth_tag_len);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
@@ -744,6 +791,127 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
return 0;
}
+static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ unsigned int assoclen, u8 *hash_subkey,
+ u8 *iv, void *aes_ctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ struct gcm_context_data data AESNI_ALIGN_ATTR;
+ struct scatter_walk dst_sg_walk = {};
+ unsigned long left = req->cryptlen;
+ unsigned long len, srclen, dstlen;
+ struct scatter_walk assoc_sg_walk;
+ struct scatter_walk src_sg_walk;
+ struct scatterlist src_start[2];
+ struct scatterlist dst_start[2];
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ u8 *src, *dst, *assoc;
+ u8 *assocmem = NULL;
+ u8 authTag[16];
+
+ if (!enc)
+ left -= auth_tag_len;
+
+ /* Linearize assoc, if not already linear */
+ if (req->src->length >= assoclen && req->src->length &&
+ (!PageHighMem(sg_page(req->src)) ||
+ req->src->offset + req->src->length < PAGE_SIZE)) {
+ scatterwalk_start(&assoc_sg_walk, req->src);
+ assoc = scatterwalk_map(&assoc_sg_walk);
+ } else {
+ /* assoc can be any length, so must be on heap */
+ assocmem = kmalloc(assoclen, GFP_ATOMIC);
+ if (unlikely(!assocmem))
+ return -ENOMEM;
+ assoc = assocmem;
+
+ scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
+ }
+
+ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
+ scatterwalk_start(&src_sg_walk, src_sg);
+ if (req->src != req->dst) {
+ dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
+ scatterwalk_start(&dst_sg_walk, dst_sg);
+ }
+
+ kernel_fpu_begin();
+ aesni_gcm_init(aes_ctx, &data, iv,
+ hash_subkey, assoc, assoclen);
+ if (req->src != req->dst) {
+ while (left) {
+ src = scatterwalk_map(&src_sg_walk);
+ dst = scatterwalk_map(&dst_sg_walk);
+ srclen = scatterwalk_clamp(&src_sg_walk, left);
+ dstlen = scatterwalk_clamp(&dst_sg_walk, left);
+ len = min(srclen, dstlen);
+ if (len) {
+ if (enc)
+ aesni_gcm_enc_update(aes_ctx, &data,
+ dst, src, len);
+ else
+ aesni_gcm_dec_update(aes_ctx, &data,
+ dst, src, len);
+ }
+ left -= len;
+
+ scatterwalk_unmap(src);
+ scatterwalk_unmap(dst);
+ scatterwalk_advance(&src_sg_walk, len);
+ scatterwalk_advance(&dst_sg_walk, len);
+ scatterwalk_done(&src_sg_walk, 0, left);
+ scatterwalk_done(&dst_sg_walk, 1, left);
+ }
+ } else {
+ while (left) {
+ dst = src = scatterwalk_map(&src_sg_walk);
+ len = scatterwalk_clamp(&src_sg_walk, left);
+ if (len) {
+ if (enc)
+ aesni_gcm_enc_update(aes_ctx, &data,
+ src, src, len);
+ else
+ aesni_gcm_dec_update(aes_ctx, &data,
+ src, src, len);
+ }
+ left -= len;
+ scatterwalk_unmap(src);
+ scatterwalk_advance(&src_sg_walk, len);
+ scatterwalk_done(&src_sg_walk, 1, left);
+ }
+ }
+ aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
+ kernel_fpu_end();
+
+ if (!assocmem)
+ scatterwalk_unmap(assoc);
+ else
+ kfree(assocmem);
+
+ if (!enc) {
+ u8 authTagMsg[16];
+
+ /* Copy out original authTag */
+ scatterwalk_map_and_copy(authTagMsg, req->src,
+ req->assoclen + req->cryptlen -
+ auth_tag_len,
+ auth_tag_len, 0);
+
+ /* Compare generated tag with passed in tag. */
+ return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
+ -EBADMSG : 0;
+ }
+
+ /* Copy in the authTag */
+ scatterwalk_map_and_copy(authTag, req->dst,
+ req->assoclen + req->cryptlen,
+ auth_tag_len, 1);
+
+ return 0;
+}
+
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
@@ -753,7 +921,14 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
+ struct gcm_context_data data AESNI_ALIGN_ATTR;
+ if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
+ aesni_gcm_enc_tfm == aesni_gcm_enc ||
+ req->cryptlen < AVX_GEN2_OPTSIZE) {
+ return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
+ aes_ctx);
+ }
if (sg_is_last(req->src) &&
(!PageHighMem(sg_page(req->src)) ||
req->src->offset + req->src->length <= PAGE_SIZE) &&
@@ -782,7 +957,7 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
}
kernel_fpu_begin();
- aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
+ aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
hash_subkey, assoc, assoclen,
dst + req->cryptlen, auth_tag_len);
kernel_fpu_end();
@@ -817,8 +992,15 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 authTag[16];
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
+ struct gcm_context_data data AESNI_ALIGN_ATTR;
int retval = 0;
+ if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
+ aesni_gcm_enc_tfm == aesni_gcm_enc ||
+ req->cryptlen < AVX_GEN2_OPTSIZE) {
+ return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
+ aes_ctx);
+ }
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
if (sg_is_last(req->src) &&
@@ -849,7 +1031,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
kernel_fpu_begin();
- aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
+ aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
hash_subkey, assoc, assoclen,
authTag, auth_tag_len);
kernel_fpu_end();
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index f9eca34301e2..3e0c07cc9124 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -25,13 +25,13 @@
*
*/
-#include <asm/processor.h>
+#include <crypto/algapi.h>
#include <crypto/blowfish.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/algapi.h>
/* regular block cipher functions */
asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
@@ -77,20 +77,28 @@ static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
-static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return blowfish_setkey(&tfm->base, key, keylen);
+}
+
+static int ecb_crypt(struct skcipher_request *req,
void (*fn)(struct bf_ctx *, u8 *, const u8 *),
void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ u8 *wsrc = walk.src.virt.addr;
+ u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 4) {
@@ -116,34 +124,25 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way);
+ return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way);
+ return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
}
-static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_encrypt(struct bf_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -164,27 +163,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
return nbytes;
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_encrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_encrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_decrypt(struct bf_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -245,24 +244,25 @@ done:
return nbytes;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_decrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_decrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
+static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[BF_BLOCK_SIZE];
@@ -276,10 +276,8 @@ static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
crypto_inc(ctrblk, BF_BLOCK_SIZE);
}
-static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -332,29 +330,30 @@ done:
return nbytes;
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
- nbytes = __ctr_crypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __ctr_crypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
- if (walk.nbytes) {
- ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-static struct crypto_alg bf_algs[4] = { {
+static struct crypto_alg bf_cipher_alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-asm",
.cra_priority = 200,
@@ -372,66 +371,50 @@ static struct crypto_alg bf_algs[4] = { {
.cia_decrypt = blowfish_decrypt,
}
}
-}, {
- .cra_name = "ecb(blowfish)",
- .cra_driver_name = "ecb-blowfish-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = BF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .setkey = blowfish_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
+};
+
+static struct skcipher_alg bf_skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(blowfish)",
+ .base.cra_driver_name = "ecb-blowfish-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = BF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct bf_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = BF_MIN_KEY_SIZE,
+ .max_keysize = BF_MAX_KEY_SIZE,
+ .setkey = blowfish_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(blowfish)",
+ .base.cra_driver_name = "cbc-blowfish-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = BF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct bf_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = BF_MIN_KEY_SIZE,
+ .max_keysize = BF_MAX_KEY_SIZE,
+ .ivsize = BF_BLOCK_SIZE,
+ .setkey = blowfish_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(blowfish)",
+ .base.cra_driver_name = "ctr-blowfish-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct bf_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = BF_MIN_KEY_SIZE,
+ .max_keysize = BF_MAX_KEY_SIZE,
+ .ivsize = BF_BLOCK_SIZE,
+ .chunksize = BF_BLOCK_SIZE,
+ .setkey = blowfish_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
},
-}, {
- .cra_name = "cbc(blowfish)",
- .cra_driver_name = "cbc-blowfish-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = BF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .ivsize = BF_BLOCK_SIZE,
- .setkey = blowfish_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(blowfish)",
- .cra_driver_name = "ctr-blowfish-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .ivsize = BF_BLOCK_SIZE,
- .setkey = blowfish_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-} };
+};
static bool is_blacklisted_cpu(void)
{
@@ -456,6 +439,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init init(void)
{
+ int err;
+
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"blowfish-x86_64: performance on this CPU "
@@ -464,12 +449,23 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs));
+ err = crypto_register_alg(&bf_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(bf_skcipher_algs,
+ ARRAY_SIZE(bf_skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&bf_cipher_alg);
+
+ return err;
}
static void __exit fini(void)
{
- crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs));
+ crypto_unregister_alg(&bf_cipher_alg);
+ crypto_unregister_skciphers(bf_skcipher_algs,
+ ARRAY_SIZE(bf_skcipher_algs));
}
module_init(init);
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 60907c139c4e..d4992e458f92 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -10,18 +10,15 @@
*
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/err.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
@@ -150,413 +147,120 @@ static const struct common_glue_ctx camellia_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
+ return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
+ &tfm->base.crt_flags);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&camellia_enc, req);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
-}
-
-static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
-{
- return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
- CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
- nbytes);
-}
-
-static inline void camellia_fpu_end(bool fpu_enabled)
-{
- glue_fpu_end(fpu_enabled);
-}
-
-static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
- &tfm->crt_flags);
-}
-
-struct crypt_priv {
- struct camellia_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_enc_32way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- }
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+ return glue_ecb_req_128bit(&camellia_dec, req);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_dec_32way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- }
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
+ req);
}
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
+ return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
}
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
+ return glue_ctr_req_128bit(&camellia_ctr, req);
}
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&camellia_enc_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&camellia_dec_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static struct crypto_alg cmll_algs[10] = { {
- .cra_name = "__ecb-camellia-aesni-avx2",
- .cra_driver_name = "__driver-ecb-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-camellia-aesni-avx2",
- .cra_driver_name = "__driver-cbc-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-camellia-aesni-avx2",
- .cra_driver_name = "__driver-ctr-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-camellia-aesni-avx2",
- .cra_driver_name = "__driver-lrw-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_camellia_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = lrw_camellia_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-camellia-aesni-avx2",
- .cra_driver_name = "__driver-xts-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(camellia)",
- .cra_driver_name = "ctr-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(camellia)",
- .cra_driver_name = "lrw-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(camellia)",
- .cra_driver_name = "xts-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg camellia_algs[] = {
+ {
+ .base.cra_name = "__ecb(camellia)",
+ .base.cra_driver_name = "__ecb-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(camellia)",
+ .base.cra_driver_name = "__cbc-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(camellia)",
+ .base.cra_driver_name = "__ctr-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .chunksize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(camellia)",
+ .base.cra_driver_name = "__xts-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = xts_camellia_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
static int __init camellia_aesni_init(void)
{
@@ -576,12 +280,15 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ return simd_register_skciphers_compat(camellia_algs,
+ ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
static void __exit camellia_aesni_fini(void)
{
- crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index d96429da88eb..d09f6521466a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -10,18 +10,15 @@
*
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/err.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
@@ -154,401 +151,142 @@ static const struct common_glue_ctx camellia_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
+ return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
+ &tfm->base.crt_flags);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
- dst, src, nbytes);
+ return glue_ecb_req_128bit(&camellia_enc, req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_ecb_req_128bit(&camellia_dec, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
+ req);
}
-static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
- CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
- nbytes);
+ return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
}
-static inline void camellia_fpu_end(bool fpu_enabled)
+static int ctr_crypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_ctr_req_128bit(&camellia_ctr, req);
}
-static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
+int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
- &tfm->crt_flags);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *flags = &tfm->base.crt_flags;
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ /* first half of xts-key is for crypt */
+ err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
+ flags);
}
+EXPORT_SYMBOL_GPL(xts_camellia_setkey);
-struct crypt_priv {
- struct camellia_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&camellia_enc_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&camellia_dec_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg cmll_algs[10] = { {
- .cra_name = "__ecb-camellia-aesni",
- .cra_driver_name = "__driver-ecb-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-camellia-aesni",
- .cra_driver_name = "__driver-cbc-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-camellia-aesni",
- .cra_driver_name = "__driver-ctr-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-camellia-aesni",
- .cra_driver_name = "__driver-lrw-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_camellia_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = lrw_camellia_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-camellia-aesni",
- .cra_driver_name = "__driver-xts-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(camellia)",
- .cra_driver_name = "ctr-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(camellia)",
- .cra_driver_name = "lrw-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(camellia)",
- .cra_driver_name = "xts-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg camellia_algs[] = {
+ {
+ .base.cra_name = "__ecb(camellia)",
+ .base.cra_driver_name = "__ecb-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(camellia)",
+ .base.cra_driver_name = "__cbc-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(camellia)",
+ .base.cra_driver_name = "__ctr-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .chunksize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(camellia)",
+ .base.cra_driver_name = "__xts-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = xts_camellia_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
static int __init camellia_aesni_init(void)
{
@@ -567,12 +305,15 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ return simd_register_skciphers_compat(camellia_algs,
+ ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
static void __exit camellia_aesni_fini(void)
{
- crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index af4840ab2a3d..dcd5e0f71b00 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -23,15 +23,12 @@
*
*/
-#include <asm/processor.h>
#include <asm/unaligned.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/algapi.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
@@ -1272,13 +1269,19 @@ int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
}
EXPORT_SYMBOL_GPL(__camellia_setkey);
-static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
- return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
+ return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len,
&tfm->crt_flags);
}
+static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return camellia_setkey(&tfm->base, key, key_len);
+}
+
void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
{
u128 iv = *src;
@@ -1373,188 +1376,33 @@ static const struct common_glue_ctx camellia_dec_cbc = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
-}
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct camellia_ctx *ctx = priv;
- int i;
-
- while (nbytes >= 2 * bsize) {
- camellia_enc_blk_2way(ctx, srcdst, srcdst);
- srcdst += bsize * 2;
- nbytes -= bsize * 2;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct camellia_ctx *ctx = priv;
- int i;
-
- while (nbytes >= 2 * bsize) {
- camellia_dec_blk_2way(ctx, srcdst, srcdst);
- srcdst += bsize * 2;
- nbytes -= bsize * 2;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx, srcdst, srcdst);
-}
-
-int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __camellia_setkey(&ctx->camellia_ctx, key,
- keylen - CAMELLIA_BLOCK_SIZE,
- &tfm->crt_flags);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table,
- key + keylen - CAMELLIA_BLOCK_SIZE);
-}
-EXPORT_SYMBOL_GPL(lrw_camellia_setkey);
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[2 * 4];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->camellia_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[2 * 4];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->camellia_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
+ return glue_ecb_req_128bit(&camellia_enc, req);
}
-void lrw_camellia_exit_tfm(struct crypto_tfm *tfm)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
+ return glue_ecb_req_128bit(&camellia_dec, req);
}
-EXPORT_SYMBOL_GPL(lrw_camellia_exit_tfm);
-int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
+ req);
}
-EXPORT_SYMBOL_GPL(xts_camellia_setkey);
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[2 * 4];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[2 * 4];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_ctr_req_128bit(&camellia_ctr, req);
}
-static struct crypto_alg camellia_algs[6] = { {
+static struct crypto_alg camellia_cipher_alg = {
.cra_name = "camellia",
.cra_driver_name = "camellia-asm",
.cra_priority = 200,
@@ -1572,109 +1420,50 @@ static struct crypto_alg camellia_algs[6] = { {
.cia_decrypt = camellia_decrypt
}
}
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(camellia)",
- .cra_driver_name = "ctr-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "lrw(camellia)",
- .cra_driver_name = "lrw-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_camellia_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = lrw_camellia_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "xts(camellia)",
- .cra_driver_name = "xts-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg camellia_skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(camellia)",
+ .base.cra_driver_name = "ctr-camellia-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .chunksize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }
+};
static bool is_blacklisted_cpu(void)
{
@@ -1700,6 +1489,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init init(void)
{
+ int err;
+
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"camellia-x86_64: performance on this CPU "
@@ -1708,12 +1499,23 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(camellia_algs, ARRAY_SIZE(camellia_algs));
+ err = crypto_register_alg(&camellia_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(camellia_skcipher_algs,
+ ARRAY_SIZE(camellia_skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&camellia_cipher_alg);
+
+ return err;
}
static void __exit fini(void)
{
- crypto_unregister_algs(camellia_algs, ARRAY_SIZE(camellia_algs));
+ crypto_unregister_alg(&camellia_cipher_alg);
+ crypto_unregister_skciphers(camellia_skcipher_algs,
+ ARRAY_SIZE(camellia_skcipher_algs));
}
module_init(init);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index dbea6020ffe7..41034745d6a2 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -21,18 +21,14 @@
*
*/
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/err.h>
-#include <crypto/ablk_helper.h>
+#include <asm/crypto/glue_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast5.h>
-#include <crypto/cryptd.h>
-#include <crypto/ctr.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/glue_helper.h>
+#include <crypto/internal/simd.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define CAST5_PARALLEL_BLOCKS 16
@@ -45,10 +41,17 @@ asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
__be64 *iv);
-static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return cast5_setkey(&tfm->base, key, keylen);
+}
+
+static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk,
+ unsigned int nbytes)
{
return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
+ walk, fpu_enabled, nbytes);
}
static inline void cast5_fpu_end(bool fpu_enabled)
@@ -56,29 +59,28 @@ static inline void cast5_fpu_end(bool fpu_enabled)
return glue_fpu_end(fpu_enabled);
}
-static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- bool enc)
+static int ecb_crypt(struct skcipher_request *req, bool enc)
{
bool fpu_enabled = false;
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
int err;
- fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
-
- err = blkcipher_walk_virt(desc, walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ u8 *wsrc = walk.src.virt.addr;
+ u8 *wdst = walk.dst.virt.addr;
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+ fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
do {
fn(ctx, wdst, wsrc);
@@ -103,76 +105,58 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, true);
+ return ecb_crypt(req, true);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, false);
+ return ecb_crypt(req, false);
}
-static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 *iv = (u64 *)walk->iv;
-
- do {
- *dst = *src ^ *iv;
- __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
-
- src += 1;
- dst += 1;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u64 *)walk->iv = *iv;
- return nbytes;
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_encrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ u64 *src = (u64 *)walk.src.virt.addr;
+ u64 *dst = (u64 *)walk.dst.virt.addr;
+ u64 *iv = (u64 *)walk.iv;
+
+ do {
+ *dst = *src ^ *iv;
+ __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
+ iv = dst;
+ src++;
+ dst++;
+ nbytes -= bsize;
+ } while (nbytes >= bsize);
+
+ *(u64 *)walk.iv = *iv;
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_decrypt(struct cast5_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -224,31 +208,29 @@ done:
return nbytes;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
bool fpu_enabled = false;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- nbytes = __cbc_decrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
+ nbytes = __cbc_decrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
return err;
}
-static void ctr_crypt_final(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *ctrblk = walk->iv;
u8 keystream[CAST5_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
@@ -261,10 +243,9 @@ static void ctr_crypt_final(struct blkcipher_desc *desc,
crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
}
-static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __ctr_crypt(struct skcipher_walk *walk,
+ struct cast5_ctx *ctx)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -307,162 +288,80 @@ done:
return nbytes;
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
bool fpu_enabled = false;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- nbytes = __ctr_crypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
+ nbytes = __ctr_crypt(&walk, ctx);
+ err = skcipher_walk_done(&walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
if (walk.nbytes) {
- ctr_crypt_final(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ ctr_crypt_final(&walk, ctx);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
+static struct skcipher_alg cast5_algs[] = {
+ {
+ .base.cra_name = "__ecb(cast5)",
+ .base.cra_driver_name = "__ecb-cast5-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST5_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast5_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST5_MIN_KEY_SIZE,
+ .max_keysize = CAST5_MAX_KEY_SIZE,
+ .setkey = cast5_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(cast5)",
+ .base.cra_driver_name = "__cbc-cast5-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST5_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast5_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST5_MIN_KEY_SIZE,
+ .max_keysize = CAST5_MAX_KEY_SIZE,
+ .ivsize = CAST5_BLOCK_SIZE,
+ .setkey = cast5_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(cast5)",
+ .base.cra_driver_name = "__ctr-cast5-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cast5_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST5_MIN_KEY_SIZE,
+ .max_keysize = CAST5_MAX_KEY_SIZE,
+ .ivsize = CAST5_BLOCK_SIZE,
+ .chunksize = CAST5_BLOCK_SIZE,
+ .setkey = cast5_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }
+};
-static struct crypto_alg cast5_algs[6] = { {
- .cra_name = "__ecb-cast5-avx",
- .cra_driver_name = "__driver-ecb-cast5-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .setkey = cast5_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-cast5-avx",
- .cra_driver_name = "__driver-cbc-cast5-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .setkey = cast5_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-cast5-avx",
- .cra_driver_name = "__driver-ctr-cast5-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .setkey = cast5_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "ecb(cast5)",
- .cra_driver_name = "ecb-cast5-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(cast5)",
- .cra_driver_name = "cbc-cast5-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(cast5)",
- .cra_driver_name = "ctr-cast5-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-} };
+static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
static int __init cast5_init(void)
{
@@ -474,12 +373,15 @@ static int __init cast5_init(void)
return -ENODEV;
}
- return crypto_register_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
+ return simd_register_skciphers_compat(cast5_algs,
+ ARRAY_SIZE(cast5_algs),
+ cast5_simd_algs);
}
static void __exit cast5_exit(void)
{
- crypto_unregister_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
+ simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
+ cast5_simd_algs);
}
module_init(cast5_init);
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 50e684768c55..9fb66b5e94b2 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -24,19 +24,13 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
-#include <crypto/cryptd.h>
-#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
+#include <crypto/internal/simd.h>
#include <crypto/xts.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8
@@ -56,6 +50,12 @@ asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
+static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return cast6_setkey(&tfm->base, key, keylen);
+}
+
static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
@@ -157,164 +157,30 @@ static const struct common_glue_ctx cast6_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&cast6_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__cast6_encrypt), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&cast6_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&cast6_ctr, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&cast6_enc, req);
}
-static inline bool cast6_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
+ return glue_ecb_req_128bit(&cast6_dec, req);
}
-static inline void cast6_fpu_end(bool fpu_enabled)
+static int cbc_encrypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
+ req);
}
-struct crypt_priv {
- struct cast6_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAST6_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
- cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __cast6_encrypt(ctx->ctx, srcdst, srcdst);
+ return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAST6_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
- cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __cast6_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-struct cast6_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct cast6_ctx cast6_ctx;
-};
-
-static int lrw_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE,
- &tfm->crt_flags);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAST6_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->cast6_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- cast6_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAST6_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->cast6_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- cast6_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static void lrw_exit_tfm(struct crypto_tfm *tfm)
-{
- struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
+ return glue_ctr_req_128bit(&cast6_ctr, req);
}
struct cast6_xts_ctx {
@@ -322,14 +188,14 @@ struct cast6_xts_ctx {
struct cast6_ctx crypt_ctx;
};
-static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *flags = &tfm->base.crt_flags;
int err;
- err = xts_check_key(tfm, key, keylen);
+ err = xts_verify_key(tfm, key, keylen);
if (err)
return err;
@@ -343,245 +209,87 @@ static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
flags);
}
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&cast6_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__cast6_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&cast6_enc_xts, req,
+ XTS_TWEAK_CAST(__cast6_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__cast6_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&cast6_dec_xts, req,
+ XTS_TWEAK_CAST(__cast6_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static struct crypto_alg cast6_algs[10] = { {
- .cra_name = "__ecb-cast6-avx",
- .cra_driver_name = "__driver-ecb-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .setkey = cast6_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-cast6-avx",
- .cra_driver_name = "__driver-cbc-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .setkey = cast6_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-cast6-avx",
- .cra_driver_name = "__driver-ctr-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = cast6_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-cast6-avx",
- .cra_driver_name = "__driver-lrw-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = lrw_cast6_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-cast6-avx",
- .cra_driver_name = "__driver-xts-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE * 2,
- .max_keysize = CAST6_MAX_KEY_SIZE * 2,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = xts_cast6_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(cast6)",
- .cra_driver_name = "ecb-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(cast6)",
- .cra_driver_name = "cbc-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(cast6)",
- .cra_driver_name = "ctr-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(cast6)",
- .cra_driver_name = "lrw-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(cast6)",
- .cra_driver_name = "xts-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE * 2,
- .max_keysize = CAST6_MAX_KEY_SIZE * 2,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg cast6_algs[] = {
+ {
+ .base.cra_name = "__ecb(cast6)",
+ .base.cra_driver_name = "__ecb-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST6_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast6_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST6_MIN_KEY_SIZE,
+ .max_keysize = CAST6_MAX_KEY_SIZE,
+ .setkey = cast6_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(cast6)",
+ .base.cra_driver_name = "__cbc-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST6_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast6_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST6_MIN_KEY_SIZE,
+ .max_keysize = CAST6_MAX_KEY_SIZE,
+ .ivsize = CAST6_BLOCK_SIZE,
+ .setkey = cast6_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(cast6)",
+ .base.cra_driver_name = "__ctr-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cast6_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST6_MIN_KEY_SIZE,
+ .max_keysize = CAST6_MAX_KEY_SIZE,
+ .ivsize = CAST6_BLOCK_SIZE,
+ .chunksize = CAST6_BLOCK_SIZE,
+ .setkey = cast6_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(cast6)",
+ .base.cra_driver_name = "__xts-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST6_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast6_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * CAST6_MIN_KEY_SIZE,
+ .max_keysize = 2 * CAST6_MAX_KEY_SIZE,
+ .ivsize = CAST6_BLOCK_SIZE,
+ .setkey = xts_cast6_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
static int __init cast6_init(void)
{
@@ -593,12 +301,15 @@ static int __init cast6_init(void)
return -ENODEV;
}
- return crypto_register_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
+ return simd_register_skciphers_compat(cast6_algs,
+ ARRAY_SIZE(cast6_algs),
+ cast6_simd_algs);
}
static void __exit cast6_exit(void)
{
- crypto_unregister_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
+ simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
+ cast6_simd_algs);
}
module_init(cast6_init);
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 30c0a37f4882..5c610d4ef9fc 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -20,13 +20,13 @@
*
*/
-#include <asm/processor.h>
+#include <crypto/algapi.h>
#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/algapi.h>
struct des3_ede_x86_ctx {
u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
@@ -83,18 +83,18 @@ static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
-static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- const u32 *expkey)
+static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
{
- unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+ const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+ struct skcipher_walk walk;
unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ u8 *wsrc = walk.src.virt.addr;
+ u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 3) {
@@ -121,36 +121,31 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, ctx->enc_expkey);
+ return ecb_crypt(req, ctx->enc_expkey);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, ctx->dec_expkey);
+ return ecb_crypt(req, ctx->dec_expkey);
}
-static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -171,27 +166,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
return nbytes;
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_encrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_encrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -250,25 +245,26 @@ done:
return nbytes;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_decrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_decrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
- struct blkcipher_walk *walk)
+ struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[DES3_EDE_BLOCK_SIZE];
@@ -282,10 +278,9 @@ static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
}
-static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
__be64 *src = (__be64 *)walk->src.virt.addr;
@@ -333,23 +328,24 @@ done:
return nbytes;
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
- nbytes = __ctr_crypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __ctr_crypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
- if (walk.nbytes) {
- ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
@@ -381,7 +377,14 @@ static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
-static struct crypto_alg des3_ede_algs[4] = { {
+static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return des3_ede_x86_setkey(&tfm->base, key, keylen);
+}
+
+static struct crypto_alg des3_ede_cipher = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-asm",
.cra_priority = 200,
@@ -399,66 +402,50 @@ static struct crypto_alg des3_ede_algs[4] = { {
.cia_decrypt = des3_ede_x86_decrypt,
}
}
-}, {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ede_x86_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_x86_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(des3_ede)",
- .cra_driver_name = "ctr-des3_ede-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_x86_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg des3_ede_skciphers[] = {
+ {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ede_x86_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_x86_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(des3_ede)",
+ .base.cra_driver_name = "ctr-des3_ede-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .chunksize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_x86_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }
+};
static bool is_blacklisted_cpu(void)
{
@@ -483,17 +470,30 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init des3_ede_x86_init(void)
{
+ int err;
+
if (!force && is_blacklisted_cpu()) {
pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
return -ENODEV;
}
- return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+ err = crypto_register_alg(&des3_ede_cipher);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(des3_ede_skciphers,
+ ARRAY_SIZE(des3_ede_skciphers));
+ if (err)
+ crypto_unregister_alg(&des3_ede_cipher);
+
+ return err;
}
static void __exit des3_ede_x86_fini(void)
{
- crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+ crypto_unregister_alg(&des3_ede_cipher);
+ crypto_unregister_skciphers(des3_ede_skciphers,
+ ARRAY_SIZE(des3_ede_skciphers));
}
module_init(des3_ede_x86_init);
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index d61e57960fe0..a78ef99a9981 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -29,313 +29,212 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/crypto/glue_helper.h>
-static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req)
{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
- unsigned int nbytes, i, func_bytes;
+ struct skcipher_walk walk;
bool fpu_enabled = false;
+ unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int func_bytes;
+ unsigned int i;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
-
+ &walk, fpu_enabled, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
- /* Process multi-block batch */
- if (nbytes >= func_bytes) {
- do {
- gctx->funcs[i].fn_u.ecb(ctx, wdst,
- wsrc);
+ if (nbytes < func_bytes)
+ continue;
- wsrc += func_bytes;
- wdst += func_bytes;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
+ /* Process multi-block batch */
+ do {
+ gctx->funcs[i].fn_u.ecb(ctx, dst, src);
+ src += func_bytes;
+ dst += func_bytes;
+ nbytes -= func_bytes;
+ } while (nbytes >= func_bytes);
- if (nbytes < bsize)
- goto done;
- }
+ if (nbytes < bsize)
+ break;
}
-
-done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
+EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
-int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
+ struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return __glue_ecb_crypt_128bit(gctx, desc, &walk);
-}
-EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
-
-static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- u128 *iv = (u128 *)walk->iv;
-
- do {
- u128_xor(dst, src, iv);
- fn(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
-
- src += 1;
- dst += 1;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u128 *)walk->iv = *iv;
- return nbytes;
-}
-
-int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ const u128 *src = (u128 *)walk.src.virt.addr;
+ u128 *dst = (u128 *)walk.dst.virt.addr;
+ u128 *iv = (u128 *)walk.iv;
+
+ do {
+ u128_xor(dst, src, iv);
+ fn(ctx, (u8 *)dst, (u8 *)dst);
+ iv = dst;
+ src++;
+ dst++;
+ nbytes -= bsize;
+ } while (nbytes >= bsize);
+
+ *(u128 *)walk.iv = *iv;
+ err = skcipher_walk_done(&walk, nbytes);
}
-
return err;
}
-EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
+EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
-static unsigned int
-__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req)
{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- u128 last_iv;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
+ struct skcipher_walk walk;
+ bool fpu_enabled = false;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
- /* Start of the last block. */
- src += nbytes / bsize - 1;
- dst += nbytes / bsize - 1;
+ while ((nbytes = walk.nbytes)) {
+ const u128 *src = walk.src.virt.addr;
+ u128 *dst = walk.dst.virt.addr;
+ unsigned int func_bytes, num_blocks;
+ unsigned int i;
+ u128 last_iv;
- last_iv = *src;
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled, nbytes);
+ /* Start of the last block. */
+ src += nbytes / bsize - 1;
+ dst += nbytes / bsize - 1;
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
+ last_iv = *src;
- /* Process multi-block batch */
- if (nbytes >= func_bytes) {
+ for (i = 0; i < gctx->num_funcs; i++) {
+ num_blocks = gctx->funcs[i].num_blocks;
+ func_bytes = bsize * num_blocks;
+
+ if (nbytes < func_bytes)
+ continue;
+
+ /* Process multi-block batch */
do {
- nbytes -= func_bytes - bsize;
src -= num_blocks - 1;
dst -= num_blocks - 1;
gctx->funcs[i].fn_u.cbc(ctx, dst, src);
- nbytes -= bsize;
+ nbytes -= func_bytes;
if (nbytes < bsize)
goto done;
- u128_xor(dst, dst, src - 1);
- src -= 1;
- dst -= 1;
+ u128_xor(dst, dst, --src);
+ dst--;
} while (nbytes >= func_bytes);
}
- }
-
done:
- u128_xor(dst, dst, (u128 *)walk->iv);
- *(u128 *)walk->iv = last_iv;
-
- return nbytes;
-}
-
-int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
- nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ u128_xor(dst, dst, (u128 *)walk.iv);
+ *(u128 *)walk.iv = last_iv;
+ err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
-EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
+EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
-static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req)
{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- u8 *src = (u8 *)walk->src.virt.addr;
- u8 *dst = (u8 *)walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
- le128 ctrblk;
- u128 tmp;
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ const unsigned int bsize = 128 / 8;
+ struct skcipher_walk walk;
+ bool fpu_enabled = false;
+ unsigned int nbytes;
+ int err;
- be128_to_le128(&ctrblk, (be128 *)walk->iv);
+ err = skcipher_walk_virt(&walk, req, false);
- memcpy(&tmp, src, nbytes);
- fn_ctr(ctx, &tmp, &tmp, &ctrblk);
- memcpy(dst, &tmp, nbytes);
+ while ((nbytes = walk.nbytes) >= bsize) {
+ const u128 *src = walk.src.virt.addr;
+ u128 *dst = walk.dst.virt.addr;
+ unsigned int func_bytes, num_blocks;
+ unsigned int i;
+ le128 ctrblk;
- le128_to_be128((be128 *)walk->iv, &ctrblk);
-}
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled, nbytes);
-static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- const unsigned int bsize = 128 / 8;
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- le128 ctrblk;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
+ be128_to_le128(&ctrblk, (be128 *)walk.iv);
- be128_to_le128(&ctrblk, (be128 *)walk->iv);
+ for (i = 0; i < gctx->num_funcs; i++) {
+ num_blocks = gctx->funcs[i].num_blocks;
+ func_bytes = bsize * num_blocks;
- /* Process multi-block batch */
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
+ if (nbytes < func_bytes)
+ continue;
- if (nbytes >= func_bytes) {
+ /* Process multi-block batch */
do {
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
-
src += num_blocks;
dst += num_blocks;
nbytes -= func_bytes;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
- goto done;
+ break;
}
- }
-
-done:
- le128_to_be128((be128 *)walk->iv, &ctrblk);
- return nbytes;
-}
-
-int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, bsize);
- while ((nbytes = walk.nbytes) >= bsize) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
- nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ le128_to_be128((be128 *)walk.iv, &ctrblk);
+ err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
- if (walk.nbytes) {
- glue_ctr_crypt_final_128bit(
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
-
-static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- void *ctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
-
- /* Process multi-block batch */
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
-
- if (nbytes >= func_bytes) {
- do {
- gctx->funcs[i].fn_u.xts(ctx, dst, src,
- (le128 *)walk->iv);
+ if (nbytes) {
+ le128 ctrblk;
+ u128 tmp;
- src += num_blocks;
- dst += num_blocks;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
+ be128_to_le128(&ctrblk, (be128 *)walk.iv);
+ memcpy(&tmp, walk.src.virt.addr, nbytes);
+ gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
+ &ctrblk);
+ memcpy(walk.dst.virt.addr, &tmp, nbytes);
+ le128_to_be128((be128 *)walk.iv, &ctrblk);
- if (nbytes < bsize)
- goto done;
- }
+ err = skcipher_walk_done(&walk, 0);
}
-done:
- return nbytes;
+ return err;
}
+EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
void *ctx,
@@ -372,46 +271,6 @@ done:
return nbytes;
}
-/* for implementations implementing faster XTS IV generator */
-int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
- void *tweak_ctx, void *crypt_ctx)
-{
- const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
- nbytes = walk.nbytes;
- if (!nbytes)
- return err;
-
- /* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled,
- nbytes < bsize ? bsize : nbytes);
-
- /* calculate first value of T */
- tweak_fn(tweak_ctx, walk.iv, walk.iv);
-
- while (nbytes) {
- nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
-
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- }
-
- glue_fpu_end(fpu_enabled);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-
int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req,
common_glue_func_t tweak_fn, void *tweak_ctx,
@@ -429,9 +288,9 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
return err;
/* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
- &walk, fpu_enabled,
- nbytes < bsize ? bsize : nbytes);
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled,
+ nbytes < bsize ? bsize : nbytes);
/* calculate first value of T */
tweak_fn(tweak_ctx, walk.iv, walk.iv);
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 870f6d812a2d..03347b16ac9d 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,15 +14,12 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
+#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/serpent-avx.h>
+#include <crypto/xts.h>
#include <asm/crypto/glue_helper.h>
+#include <asm/crypto/serpent-avx.h>
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
@@ -40,6 +37,12 @@ asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
+static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
+}
+
static const struct common_glue_ctx serpent_enc = {
.num_funcs = 3,
.fpu_blocks_limit = 8,
@@ -136,403 +139,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_enc, req);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_dec, req);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
- dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
+ return glue_ctr_req_128bit(&serpent_ctr, req);
}
-static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- /* since reusing AVX functions, starts using FPU at 8 parallel blocks */
- return glue_fpu_begin(SERPENT_BLOCK_SIZE, 8, NULL, fpu_enabled, nbytes);
-}
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-static inline void serpent_fpu_end(bool fpu_enabled)
-{
- glue_fpu_end(fpu_enabled);
+ return glue_xts_req_128bit(&serpent_enc_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-struct crypt_priv {
- struct serpent_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&serpent_dec_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg srp_algs[10] = { {
- .cra_name = "__ecb-serpent-avx2",
- .cra_driver_name = "__driver-ecb-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[0].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-serpent-avx2",
- .cra_driver_name = "__driver-cbc-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[1].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-serpent-avx2",
- .cra_driver_name = "__driver-ctr-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[2].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-serpent-avx2",
- .cra_driver_name = "__driver-lrw-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[3].cra_list),
- .cra_exit = lrw_serpent_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = lrw_serpent_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-serpent-avx2",
- .cra_driver_name = "__driver-xts-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[4].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(serpent)",
- .cra_driver_name = "ecb-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[5].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(serpent)",
- .cra_driver_name = "cbc-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[6].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(serpent)",
- .cra_driver_name = "ctr-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[7].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(serpent)",
- .cra_driver_name = "lrw-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[8].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(serpent)",
- .cra_driver_name = "xts-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[9].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg serpent_algs[] = {
+ {
+ .base.cra_name = "__ecb(serpent)",
+ .base.cra_driver_name = "__ecb-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(serpent)",
+ .base.cra_driver_name = "__cbc-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(serpent)",
+ .base.cra_driver_name = "__ctr-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .chunksize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(serpent)",
+ .base.cra_driver_name = "__xts-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
+ .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = xts_serpent_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init init(void)
{
@@ -548,12 +261,15 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs));
+ return simd_register_skciphers_compat(serpent_algs,
+ ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
static void __exit fini(void)
{
- crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
+ simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
module_init(init);
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 6f778d3daa22..458567ecf76c 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -24,21 +24,15 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <crypto/cryptd.h>
-#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/serpent-avx.h>
#include <asm/crypto/glue_helper.h>
+#include <asm/crypto/serpent-avx.h>
/* 8-way parallel cipher functions */
asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
@@ -91,6 +85,31 @@ void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
}
EXPORT_SYMBOL_GPL(serpent_xts_dec);
+static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
+}
+
+int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ /* first half of xts-key is for crypt */
+ err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
+}
+EXPORT_SYMBOL_GPL(xts_serpent_setkey);
static const struct common_glue_ctx serpent_enc = {
.num_funcs = 2,
@@ -170,423 +189,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_enc, req);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_dec, req);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
- dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
+ return glue_ctr_req_128bit(&serpent_ctr, req);
}
-static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
-}
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-static inline void serpent_fpu_end(bool fpu_enabled)
-{
- glue_fpu_end(fpu_enabled);
+ return glue_xts_req_128bit(&serpent_enc_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-struct crypt_priv {
- struct serpent_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
- SERPENT_BLOCK_SIZE);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen -
- SERPENT_BLOCK_SIZE);
-}
-EXPORT_SYMBOL_GPL(lrw_serpent_setkey);
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
+ return glue_xts_req_128bit(&serpent_dec_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-void lrw_serpent_exit_tfm(struct crypto_tfm *tfm)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-EXPORT_SYMBOL_GPL(lrw_serpent_exit_tfm);
-
-int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-EXPORT_SYMBOL_GPL(xts_serpent_setkey);
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg serpent_algs[10] = { {
- .cra_name = "__ecb-serpent-avx",
- .cra_driver_name = "__driver-ecb-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-serpent-avx",
- .cra_driver_name = "__driver-cbc-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-serpent-avx",
- .cra_driver_name = "__driver-ctr-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-serpent-avx",
- .cra_driver_name = "__driver-lrw-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_serpent_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = lrw_serpent_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-serpent-avx",
- .cra_driver_name = "__driver-xts-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(serpent)",
- .cra_driver_name = "ecb-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(serpent)",
- .cra_driver_name = "cbc-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(serpent)",
- .cra_driver_name = "ctr-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(serpent)",
- .cra_driver_name = "lrw-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(serpent)",
- .cra_driver_name = "xts-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg serpent_algs[] = {
+ {
+ .base.cra_name = "__ecb(serpent)",
+ .base.cra_driver_name = "__ecb-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(serpent)",
+ .base.cra_driver_name = "__cbc-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(serpent)",
+ .base.cra_driver_name = "__ctr-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .chunksize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(serpent)",
+ .base.cra_driver_name = "__xts-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
+ .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = xts_serpent_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_init(void)
{
@@ -598,12 +307,15 @@ static int __init serpent_init(void)
return -ENODEV;
}
- return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ return simd_register_skciphers_compat(serpent_algs,
+ ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
static void __exit serpent_exit(void)
{
- crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
module_init(serpent_init);
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index ac0e831943f5..3dafe137596a 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -30,21 +30,22 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
-#include <crypto/serpent.h>
-#include <crypto/cryptd.h>
#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
+#include <crypto/internal/simd.h>
+#include <crypto/serpent.h>
#include <asm/crypto/serpent-sse2.h>
#include <asm/crypto/glue_helper.h>
+static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
+}
+
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
{
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
@@ -139,464 +140,79 @@ static const struct common_glue_ctx serpent_dec_cbc = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
- dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_enc, req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_ecb_req_128bit(&serpent_dec, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ req);
}
-static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
+ return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
}
-static inline void serpent_fpu_end(bool fpu_enabled)
+static int ctr_crypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_ctr_req_128bit(&serpent_ctr, req);
}
-struct crypt_priv {
- struct serpent_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-struct serpent_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct serpent_ctx serpent_ctx;
-};
-
-static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
- SERPENT_BLOCK_SIZE);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen -
- SERPENT_BLOCK_SIZE);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static void lrw_exit_tfm(struct crypto_tfm *tfm)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-
-struct serpent_xts_ctx {
- struct serpent_ctx tweak_ctx;
- struct serpent_ctx crypt_ctx;
+static struct skcipher_alg serpent_algs[] = {
+ {
+ .base.cra_name = "__ecb(serpent)",
+ .base.cra_driver_name = "__ecb-serpent-sse2",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(serpent)",
+ .base.cra_driver_name = "__cbc-serpent-sse2",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(serpent)",
+ .base.cra_driver_name = "__ctr-serpent-sse2",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .chunksize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ },
};
-static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->crypt_ctx,
- .fpu_enabled = false,
- };
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = xts_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->crypt_ctx,
- .fpu_enabled = false,
- };
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = xts_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static struct crypto_alg serpent_algs[10] = { {
- .cra_name = "__ecb-serpent-sse2",
- .cra_driver_name = "__driver-ecb-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-serpent-sse2",
- .cra_driver_name = "__driver-cbc-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-serpent-sse2",
- .cra_driver_name = "__driver-ctr-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-serpent-sse2",
- .cra_driver_name = "__driver-lrw-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = lrw_serpent_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-serpent-sse2",
- .cra_driver_name = "__driver-xts-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(serpent)",
- .cra_driver_name = "ecb-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(serpent)",
- .cra_driver_name = "cbc-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(serpent)",
- .cra_driver_name = "ctr-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(serpent)",
- .cra_driver_name = "lrw-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(serpent)",
- .cra_driver_name = "xts-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-} };
+static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_sse2_init(void)
{
@@ -605,12 +221,15 @@ static int __init serpent_sse2_init(void)
return -ENODEV;
}
- return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ return simd_register_skciphers_compat(serpent_algs,
+ ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
static void __exit serpent_sse2_exit(void)
{
- crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
module_init(serpent_sse2_init);
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index acf9fdf01671..e17655ffde79 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -106,13 +106,6 @@ static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
(struct sha1_mb_mgr *state);
-static inline void sha1_init_digest(uint32_t *digest)
-{
- static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
- SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
- memcpy(digest, initial_digest, sizeof(initial_digest));
-}
-
static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
uint64_t total_len)
{
@@ -244,11 +237,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
uint32_t len,
int flags)
{
- if (flags & (~HASH_ENTIRE)) {
- /*
- * User should not pass anything other than FIRST, UPDATE, or
- * LAST
- */
+ if (flags & ~(HASH_UPDATE | HASH_LAST)) {
+ /* User should not pass anything other than UPDATE or LAST */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
return ctx;
}
@@ -259,24 +249,12 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
return ctx;
}
- if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
return ctx;
}
-
- if (flags & HASH_FIRST) {
- /* Init digest */
- sha1_init_digest(ctx->job.result_digest);
-
- /* Reset byte counter */
- ctx->total_length = 0;
-
- /* Clear extra blocks */
- ctx->partial_block_buffer_length = 0;
- }
-
/*
* If we made it here, there were no errors during this call to
* submit
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index 13590ccf965c..9454bd16f9f8 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -57,11 +57,9 @@
#include "sha1_mb_mgr.h"
#define HASH_UPDATE 0x00
-#define HASH_FIRST 0x01
-#define HASH_LAST 0x02
-#define HASH_ENTIRE 0x03
-#define HASH_DONE 0x04
-#define HASH_FINAL 0x08
+#define HASH_LAST 0x01
+#define HASH_DONE 0x02
+#define HASH_FINAL 0x04
#define HASH_CTX_STS_IDLE 0x00
#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 7926a226b120..4c46ac1b6653 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -106,14 +106,6 @@ static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
(struct sha256_mb_mgr *state);
-inline void sha256_init_digest(uint32_t *digest)
-{
- static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
- memcpy(digest, initial_digest, sizeof(initial_digest));
-}
-
inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
uint64_t total_len)
{
@@ -245,10 +237,8 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
uint32_t len,
int flags)
{
- if (flags & (~HASH_ENTIRE)) {
- /* User should not pass anything other than FIRST, UPDATE
- * or LAST
- */
+ if (flags & ~(HASH_UPDATE | HASH_LAST)) {
+ /* User should not pass anything other than UPDATE or LAST */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
return ctx;
}
@@ -259,23 +249,12 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
return ctx;
}
- if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
return ctx;
}
- if (flags & HASH_FIRST) {
- /* Init digest */
- sha256_init_digest(ctx->job.result_digest);
-
- /* Reset byte counter */
- ctx->total_length = 0;
-
- /* Clear extra blocks */
- ctx->partial_block_buffer_length = 0;
- }
-
/* If we made it here, there was no error during this call to submit */
ctx->error = HASH_CTX_ERROR_NONE;
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
index aabb30320af0..7c432543dc7f 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -57,11 +57,9 @@
#include "sha256_mb_mgr.h"
#define HASH_UPDATE 0x00
-#define HASH_FIRST 0x01
-#define HASH_LAST 0x02
-#define HASH_ENTIRE 0x03
-#define HASH_DONE 0x04
-#define HASH_FINAL 0x08
+#define HASH_LAST 0x01
+#define HASH_DONE 0x02
+#define HASH_FINAL 0x04
#define HASH_CTX_STS_IDLE 0x00
#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 458409b7568d..39e2bbdc1836 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -107,15 +107,6 @@ static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
(struct sha512_mb_mgr *state);
-inline void sha512_init_digest(uint64_t *digest)
-{
- static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
- SHA512_H0, SHA512_H1, SHA512_H2,
- SHA512_H3, SHA512_H4, SHA512_H5,
- SHA512_H6, SHA512_H7 };
- memcpy(digest, initial_digest, sizeof(initial_digest));
-}
-
inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
uint64_t total_len)
{
@@ -263,11 +254,8 @@ static struct sha512_hash_ctx
mgr = cstate->mgr;
spin_lock_irqsave(&cstate->work_lock, irqflags);
- if (flags & (~HASH_ENTIRE)) {
- /*
- * User should not pass anything other than FIRST, UPDATE, or
- * LAST
- */
+ if (flags & ~(HASH_UPDATE | HASH_LAST)) {
+ /* User should not pass anything other than UPDATE or LAST */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
goto unlock;
}
@@ -278,24 +266,12 @@ static struct sha512_hash_ctx
goto unlock;
}
- if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
goto unlock;
}
-
- if (flags & HASH_FIRST) {
- /* Init digest */
- sha512_init_digest(ctx->job.result_digest);
-
- /* Reset byte counter */
- ctx->total_length = 0;
-
- /* Clear extra blocks */
- ctx->partial_block_buffer_length = 0;
- }
-
/*
* If we made it here, there were no errors during this call to
* submit
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
index e4653f5eec3f..e5c465bd821e 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -57,11 +57,9 @@
#include "sha512_mb_mgr.h"
#define HASH_UPDATE 0x00
-#define HASH_FIRST 0x01
-#define HASH_LAST 0x02
-#define HASH_ENTIRE 0x03
-#define HASH_DONE 0x04
-#define HASH_FINAL 0x08
+#define HASH_LAST 0x01
+#define HASH_DONE 0x02
+#define HASH_FINAL 0x04
#define HASH_CTX_STS_IDLE 0x00
#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index b7a3904b953c..66d989230d10 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -24,24 +24,15 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
#include <crypto/twofish.h>
-#include <crypto/cryptd.h>
-#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/twofish.h>
#include <asm/crypto/glue_helper.h>
-#include <crypto/scatterwalk.h>
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
+#include <asm/crypto/twofish.h>
#define TWOFISH_PARALLEL_BLOCKS 8
@@ -61,6 +52,12 @@ asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
+static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return twofish_setkey(&tfm->base, key, keylen);
+}
+
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
@@ -79,6 +76,31 @@ static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
GLUE_FUNC_CAST(twofish_dec_blk));
}
+struct twofish_xts_ctx {
+ struct twofish_ctx tweak_ctx;
+ struct twofish_ctx crypt_ctx;
+};
+
+static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *flags = &tfm->base.crt_flags;
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ /* first half of xts-key is for crypt */
+ err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
+ flags);
+}
static const struct common_glue_ctx twofish_enc = {
.num_funcs = 3,
@@ -170,389 +192,113 @@ static const struct common_glue_ctx twofish_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
- dst, src, nbytes);
+ return glue_ecb_req_128bit(&twofish_enc, req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_ecb_req_128bit(&twofish_dec, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
+ req);
}
-static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL,
- fpu_enabled, nbytes);
+ return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
}
-static inline void twofish_fpu_end(bool fpu_enabled)
+static int ctr_crypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_ctr_req_128bit(&twofish_ctr, req);
}
-struct crypt_priv {
- struct twofish_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
- twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- nbytes %= bsize * 3;
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_enc_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&twofish_enc_xts, req,
+ XTS_TWEAK_CAST(twofish_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
- twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
-
- nbytes %= bsize * 3;
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_dec_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&twofish_dec_xts, req,
+ XTS_TWEAK_CAST(twofish_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[TWOFISH_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->twofish_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- twofish_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[TWOFISH_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->twofish_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- twofish_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&twofish_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(twofish_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&twofish_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(twofish_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg twofish_algs[10] = { {
- .cra_name = "__ecb-twofish-avx",
- .cra_driver_name = "__driver-ecb-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-twofish-avx",
- .cra_driver_name = "__driver-cbc-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = twofish_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-twofish-avx",
- .cra_driver_name = "__driver-ctr-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-twofish-avx",
- .cra_driver_name = "__driver-lrw-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_twofish_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE +
- TF_BLOCK_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE +
- TF_BLOCK_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = lrw_twofish_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-twofish-avx",
- .cra_driver_name = "__driver-xts-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE * 2,
- .max_keysize = TF_MAX_KEY_SIZE * 2,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = xts_twofish_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(twofish)",
- .cra_driver_name = "ecb-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(twofish)",
- .cra_driver_name = "cbc-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(twofish)",
- .cra_driver_name = "ctr-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(twofish)",
- .cra_driver_name = "lrw-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE +
- TF_BLOCK_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE +
- TF_BLOCK_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(twofish)",
- .cra_driver_name = "xts-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE * 2,
- .max_keysize = TF_MAX_KEY_SIZE * 2,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg twofish_algs[] = {
+ {
+ .base.cra_name = "__ecb(twofish)",
+ .base.cra_driver_name = "__ecb-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(twofish)",
+ .base.cra_driver_name = "__cbc-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(twofish)",
+ .base.cra_driver_name = "__ctr-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .chunksize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(twofish)",
+ .base.cra_driver_name = "__xts-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * TF_MIN_KEY_SIZE,
+ .max_keysize = 2 * TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .setkey = xts_twofish_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
static int __init twofish_init(void)
{
@@ -563,12 +309,15 @@ static int __init twofish_init(void)
return -ENODEV;
}
- return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
+ return simd_register_skciphers_compat(twofish_algs,
+ ARRAY_SIZE(twofish_algs),
+ twofish_simd_algs);
}
static void __exit twofish_exit(void)
{
- crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
+ simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
+ twofish_simd_algs);
}
module_init(twofish_init);
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 243e90a4b5d9..571485502ec8 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -20,22 +20,26 @@
*
*/
-#include <asm/processor.h>
+#include <asm/crypto/glue_helper.h>
+#include <asm/crypto/twofish.h>
+#include <crypto/algapi.h>
+#include <crypto/b128ops.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/algapi.h>
-#include <crypto/twofish.h>
-#include <crypto/b128ops.h>
-#include <asm/crypto/twofish.h>
-#include <asm/crypto/glue_helper.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
+static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return twofish_setkey(&tfm->base, key, keylen);
+}
+
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
@@ -151,284 +155,74 @@ static const struct common_glue_ctx twofish_dec_cbc = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
-}
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct twofish_ctx *ctx = priv;
- int i;
-
- if (nbytes == 3 * bsize) {
- twofish_enc_blk_3way(ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_enc_blk(ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct twofish_ctx *ctx = priv;
- int i;
-
- if (nbytes == 3 * bsize) {
- twofish_dec_blk_3way(ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_dec_blk(ctx, srcdst, srcdst);
-}
-
-int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
- &tfm->crt_flags);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
+ return glue_ecb_req_128bit(&twofish_enc, req);
}
-EXPORT_SYMBOL_GPL(lrw_twofish_setkey);
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[3];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->twofish_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
+ return glue_ecb_req_128bit(&twofish_dec, req);
}
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[3];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->twofish_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
+ req);
}
-void lrw_twofish_exit_tfm(struct crypto_tfm *tfm)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-EXPORT_SYMBOL_GPL(lrw_twofish_exit_tfm);
-
-int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
-}
-EXPORT_SYMBOL_GPL(xts_twofish_setkey);
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[3];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[3];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_ctr_req_128bit(&twofish_ctr, req);
}
-static struct crypto_alg tf_algs[5] = { {
- .cra_name = "ecb(twofish)",
- .cra_driver_name = "ecb-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(twofish)",
- .cra_driver_name = "cbc-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(twofish)",
- .cra_driver_name = "ctr-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "lrw(twofish)",
- .cra_driver_name = "lrw-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_twofish_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = lrw_twofish_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "xts(twofish)",
- .cra_driver_name = "xts-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE * 2,
- .max_keysize = TF_MAX_KEY_SIZE * 2,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = xts_twofish_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
+static struct skcipher_alg tf_skciphers[] = {
+ {
+ .base.cra_name = "ecb(twofish)",
+ .base.cra_driver_name = "ecb-twofish-3way",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(twofish)",
+ .base.cra_driver_name = "cbc-twofish-3way",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(twofish)",
+ .base.cra_driver_name = "ctr-twofish-3way",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .chunksize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
},
-} };
+};
static bool is_blacklisted_cpu(void)
{
@@ -478,12 +272,13 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(tf_algs, ARRAY_SIZE(tf_algs));
+ return crypto_register_skciphers(tf_skciphers,
+ ARRAY_SIZE(tf_skciphers));
}
static void __exit fini(void)
{
- crypto_unregister_algs(tf_algs, ARRAY_SIZE(tf_algs));
+ crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers));
}
module_init(init);
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
index 10f8d590bcfe..a5d86fc0593f 100644
--- a/arch/x86/include/asm/crypto/camellia.h
+++ b/arch/x86/include/asm/crypto/camellia.h
@@ -2,8 +2,9 @@
#ifndef ASM_X86_CAMELLIA_H
#define ASM_X86_CAMELLIA_H
-#include <linux/kernel.h>
+#include <crypto/b128ops.h>
#include <linux/crypto.h>
+#include <linux/kernel.h>
#define CAMELLIA_MIN_KEY_SIZE 16
#define CAMELLIA_MAX_KEY_SIZE 32
@@ -11,16 +12,13 @@
#define CAMELLIA_TABLE_BYTE_LEN 272
#define CAMELLIA_PARALLEL_BLOCKS 2
+struct crypto_skcipher;
+
struct camellia_ctx {
u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
u32 key_length;
};
-struct camellia_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct camellia_ctx camellia_ctx;
-};
-
struct camellia_xts_ctx {
struct camellia_ctx tweak_ctx;
struct camellia_ctx crypt_ctx;
@@ -30,11 +28,7 @@ extern int __camellia_setkey(struct camellia_ctx *cctx,
const unsigned char *key,
unsigned int key_len, u32 *flags);
-extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
-
-extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
/* regular block cipher functions */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 553a03de55c3..d1818634ae7e 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -45,7 +45,7 @@ struct common_glue_ctx {
};
static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
- struct blkcipher_desc *desc,
+ struct skcipher_walk *walk,
bool fpu_enabled, unsigned int nbytes)
{
if (likely(fpu_blocks_limit < 0))
@@ -61,33 +61,6 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
return false;
- if (desc) {
- /* prevent sleeping if FPU is in use */
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- }
-
- kernel_fpu_begin();
- return true;
-}
-
-static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
- int fpu_blocks_limit,
- struct skcipher_walk *walk,
- bool fpu_enabled, unsigned int nbytes)
-{
- if (likely(fpu_blocks_limit < 0))
- return false;
-
- if (fpu_enabled)
- return true;
-
- /*
- * Vector-registers are only used when chunk to be processed is large
- * enough, so do not enable FPU until it is necessary.
- */
- if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
- return false;
-
/* prevent sleeping if FPU is in use */
skcipher_walk_atomise(walk);
@@ -126,41 +99,17 @@ static inline void le128_inc(le128 *i)
i->b = cpu_to_le64(b);
}
-extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-
-extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
-
-extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
-
-extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-
-extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx);
-
-extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx);
+extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req);
+
+extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
+ struct skcipher_request *req);
+
+extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req);
+
+extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req);
extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req,
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
index c958b7bd0fcb..db7c9cc32234 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -2,15 +2,13 @@
#ifndef ASM_X86_SERPENT_AVX_H
#define ASM_X86_SERPENT_AVX_H
-#include <linux/crypto.h>
+#include <crypto/b128ops.h>
#include <crypto/serpent.h>
+#include <linux/types.h>
-#define SERPENT_PARALLEL_BLOCKS 8
+struct crypto_skcipher;
-struct serpent_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct serpent_ctx serpent_ctx;
-};
+#define SERPENT_PARALLEL_BLOCKS 8
struct serpent_xts_ctx {
struct serpent_ctx tweak_ctx;
@@ -38,12 +36,7 @@ extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
-extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-
-extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
-
-extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
#endif
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
index 65bb80adba3e..f618bf272b90 100644
--- a/arch/x86/include/asm/crypto/twofish.h
+++ b/arch/x86/include/asm/crypto/twofish.h
@@ -4,19 +4,8 @@
#include <linux/crypto.h>
#include <crypto/twofish.h>
-#include <crypto/lrw.h>
#include <crypto/b128ops.h>
-struct twofish_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct twofish_ctx twofish_ctx;
-};
-
-struct twofish_xts_ctx {
- struct twofish_ctx tweak_ctx;
- struct twofish_ctx crypt_ctx;
-};
-
/* regular block cipher functions from twofish_x86_64 module */
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
@@ -36,12 +25,4 @@ extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
-extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-
-extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
-
-extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-
#endif /* ASM_X86_TWOFISH_H */
diff --git a/crypto/Kconfig b/crypto/Kconfig
index b75264b09a46..c0dabed5122e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -245,10 +245,6 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
-config CRYPTO_ABLK_HELPER
- tristate
- select CRYPTO_CRYPTD
-
config CRYPTO_SIMD
tristate
select CRYPTO_CRYPTD
@@ -324,6 +320,14 @@ config CRYPTO_CBC
CBC: Cipher Block Chaining mode
This block cipher algorithm is required for IPSec.
+config CRYPTO_CFB
+ tristate "CFB support"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ help
+ CFB: Cipher FeedBack mode
+ This block cipher algorithm is required for TPM2 Cryptography.
+
config CRYPTO_CTR
tristate "CTR support"
select CRYPTO_BLKCIPHER
@@ -1114,7 +1118,7 @@ config CRYPTO_BLOWFISH_COMMON
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_BLOWFISH_COMMON
help
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
@@ -1145,10 +1149,8 @@ config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64).
@@ -1164,12 +1166,10 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_BLKCIPHER
select CRYPTO_CAMELLIA_X86_64
- select CRYPTO_LRW
+ select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_SIMD
select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX).
@@ -1186,14 +1186,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
@@ -1238,11 +1231,10 @@ config CRYPTO_CAST5
config CRYPTO_CAST5_AVX_X86_64
tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_CAST_COMMON
+ select CRYPTO_BLKCIPHER
select CRYPTO_CAST5
+ select CRYPTO_CAST_COMMON
+ select CRYPTO_SIMD
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
@@ -1261,13 +1253,11 @@ config CRYPTO_CAST6
config CRYPTO_CAST6_AVX_X86_64
tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_CAST_COMMON
+ select CRYPTO_BLKCIPHER
select CRYPTO_CAST6
- select CRYPTO_LRW
+ select CRYPTO_CAST_COMMON
+ select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_SIMD
select CRYPTO_XTS
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
@@ -1294,7 +1284,7 @@ config CRYPTO_DES_SPARC64
config CRYPTO_DES3_EDE_X86_64
tristate "Triple DES EDE cipher algorithm (x86-64)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_DES
help
Triple DES EDE (FIPS 46-3) algorithm.
@@ -1422,13 +1412,10 @@ config CRYPTO_SERPENT
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
- select CRYPTO_LRW
- select CRYPTO_XTS
+ select CRYPTO_SIMD
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1444,13 +1431,10 @@ config CRYPTO_SERPENT_SSE2_X86_64
config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
depends on X86 && !64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
- select CRYPTO_LRW
- select CRYPTO_XTS
+ select CRYPTO_SIMD
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1466,12 +1450,10 @@ config CRYPTO_SERPENT_SSE2_586
config CRYPTO_SERPENT_AVX_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
- select CRYPTO_LRW
+ select CRYPTO_SIMD
select CRYPTO_XTS
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1488,14 +1470,7 @@ config CRYPTO_SERPENT_AVX_X86_64
config CRYPTO_SERPENT_AVX2_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX2)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1508,6 +1483,45 @@ config CRYPTO_SERPENT_AVX2_X86_64
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
+config CRYPTO_SM4
+ tristate "SM4 cipher algorithm"
+ select CRYPTO_ALGAPI
+ help
+ SM4 cipher algorithms (OSCCA GB/T 32907-2016).
+
+ SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+ Organization of State Commercial Administration of China (OSCCA)
+ as an authorized cryptographic algorithms for the use within China.
+
+ SMS4 was originally created for use in protecting wireless
+ networks, and is mandated in the Chinese National Standard for
+ Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure)
+ (GB.15629.11-2003).
+
+ The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and
+ standardized through TC 260 of the Standardization Administration
+ of the People's Republic of China (SAC).
+
+ The input, output, and key of SMS4 are each 128 bits.
+
+ See also: <https://eprint.iacr.org/2008/329.pdf>
+
+ If unsure, say N.
+
+config CRYPTO_SPECK
+ tristate "Speck cipher algorithm"
+ select CRYPTO_ALGAPI
+ help
+ Speck is a lightweight block cipher that is tuned for optimal
+ performance in software (rather than hardware).
+
+ Speck may not be as secure as AES, and should only be used on systems
+ where AES is not fast enough.
+
+ See also: <https://eprint.iacr.org/2013/404.pdf>
+
+ If unsure, say N.
+
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
@@ -1581,12 +1595,10 @@ config CRYPTO_TWOFISH_X86_64
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Twofish cipher algorithm (x86_64, 3-way parallel).
@@ -1604,15 +1616,12 @@ config CRYPTO_TWOFISH_X86_64_3WAY
config CRYPTO_TWOFISH_AVX_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_SIMD
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_TWOFISH_X86_64_3WAY
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Twofish cipher algorithm (x86_64/AVX).
diff --git a/crypto/Makefile b/crypto/Makefile
index cdbc03b35510..4fc69fe94e6a 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+obj-$(CONFIG_CRYPTO_CFB) += cfb.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_CTS) += cts.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
@@ -100,6 +101,7 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
+obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
@@ -110,6 +112,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
+obj-$(CONFIG_CRYPTO_SPECK) += speck.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
@@ -149,6 +152,5 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
-obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
crypto_simd-y := simd.o
obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
deleted file mode 100644
index 09776bb1360e..000000000000
--- a/crypto/ablk_helper.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Shared async block cipher helpers
- *
- * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
- *
- * Based on aesni-intel_glue.c by:
- * Copyright (C) 2008, Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <crypto/algapi.h>
-#include <crypto/cryptd.h>
-#include <crypto/ablk_helper.h>
-#include <asm/simd.h>
-
-int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
- int err;
-
- crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
- & CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(child, key, key_len);
- crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
- & CRYPTO_TFM_RES_MASK);
- return err;
-}
-EXPORT_SYMBOL_GPL(ablk_set_key);
-
-int __ablk_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct blkcipher_desc desc;
-
- desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
- desc.info = req->info;
- desc.flags = 0;
-
- return crypto_blkcipher_crt(desc.tfm)->encrypt(
- &desc, req->dst, req->src, req->nbytes);
-}
-EXPORT_SYMBOL_GPL(__ablk_encrypt);
-
-int ablk_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!may_use_simd() ||
- (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
- struct ablkcipher_request *cryptd_req =
- ablkcipher_request_ctx(req);
-
- *cryptd_req = *req;
- ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
-
- return crypto_ablkcipher_encrypt(cryptd_req);
- } else {
- return __ablk_encrypt(req);
- }
-}
-EXPORT_SYMBOL_GPL(ablk_encrypt);
-
-int ablk_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!may_use_simd() ||
- (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
- struct ablkcipher_request *cryptd_req =
- ablkcipher_request_ctx(req);
-
- *cryptd_req = *req;
- ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
-
- return crypto_ablkcipher_decrypt(cryptd_req);
- } else {
- struct blkcipher_desc desc;
-
- desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
- desc.info = req->info;
- desc.flags = 0;
-
- return crypto_blkcipher_crt(desc.tfm)->decrypt(
- &desc, req->dst, req->src, req->nbytes);
- }
-}
-EXPORT_SYMBOL_GPL(ablk_decrypt);
-
-void ablk_exit(struct crypto_tfm *tfm)
-{
- struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_free_ablkcipher(ctx->cryptd_tfm);
-}
-EXPORT_SYMBOL_GPL(ablk_exit);
-
-int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
-{
- struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_ablkcipher *cryptd_tfm;
-
- cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- ctx->cryptd_tfm = cryptd_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(&cryptd_tfm->base);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ablk_init_common);
-
-int ablk_init(struct crypto_tfm *tfm)
-{
- char drv_name[CRYPTO_MAX_ALG_NAME];
-
- snprintf(drv_name, sizeof(drv_name), "__driver-%s",
- crypto_tfm_alg_driver_name(tfm));
-
- return ablk_init_common(tfm, drv_name);
-}
-EXPORT_SYMBOL_GPL(ablk_init);
-
-MODULE_LICENSE("GPL");
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 266fc1d64f61..a64c143165b1 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
- walk->data += walk->offset;
-
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
- return nbytes;
+ if (nbytes) {
+ walk->data += walk->offset;
+ return nbytes;
+ }
}
if (walk->flags & CRYPTO_ALG_ASYNC)
@@ -446,24 +447,12 @@ static int ahash_def_finup(struct ahash_request *req)
return ahash_def_finup_finish1(req, err);
}
-static int ahash_no_export(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int ahash_no_import(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
- hash->export = ahash_no_export;
- hash->import = ahash_no_import;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);
@@ -473,16 +462,14 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->final = alg->final;
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
+ hash->export = alg->export;
+ hash->import = alg->import;
if (alg->setkey) {
hash->setkey = alg->setkey;
if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
}
- if (alg->export)
- hash->export = alg->export;
- if (alg->import)
- hash->import = alg->import;
return 0;
}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 395b082d03a9..2a0271b5f62a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -543,9 +543,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
- if (unlikely(!crypto_mod_get(&inst->alg)))
- return -EAGAIN;
-
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(&inst->alg);
@@ -563,14 +560,9 @@ unlock:
goto err;
crypto_wait_for_test(larval);
-
- /* Remove instance if test failed */
- if (!(inst->alg.cra_flags & CRYPTO_ALG_TESTED))
- crypto_unregister_instance(inst);
err = 0;
err:
- crypto_mod_put(&inst->alg);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
diff --git a/crypto/api.c b/crypto/api.c
index 70a894e52ff3..1d5290c67108 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -193,17 +193,24 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
return alg;
}
-struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
+static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
+ u32 test = 0;
+
+ if (!((type | mask) & CRYPTO_ALG_TESTED))
+ test |= CRYPTO_ALG_TESTED;
down_read(&crypto_alg_sem);
- alg = __crypto_alg_lookup(name, type, mask);
+ alg = __crypto_alg_lookup(name, type | test, mask | test);
+ if (!alg && test)
+ alg = __crypto_alg_lookup(name, type, mask) ?
+ ERR_PTR(-ELIBBAD) : NULL;
up_read(&crypto_alg_sem);
return alg;
}
-EXPORT_SYMBOL_GPL(crypto_alg_lookup);
static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
u32 mask)
@@ -227,10 +234,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
alg = crypto_alg_lookup(name, type, mask);
}
- if (alg)
- return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
+ if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
+ alg = crypto_larval_wait(alg);
+ else if (!alg)
+ alg = crypto_larval_add(name, type, mask);
- return crypto_larval_add(name, type, mask);
+ return alg;
}
int crypto_probing_notify(unsigned long val, void *v)
@@ -253,11 +262,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
struct crypto_alg *larval;
int ok;
- if (!((type | mask) & CRYPTO_ALG_TESTED)) {
- type |= CRYPTO_ALG_TESTED;
- mask |= CRYPTO_ALG_TESTED;
- }
-
/*
* If the internal flag is set for a cipher, require a caller to
* to invoke the cipher with the internal flag to use that cipher.
@@ -485,20 +489,14 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
{
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
- crypto_alg_mod_lookup;
-
if (frontend) {
type &= frontend->maskclear;
mask &= frontend->maskclear;
type |= frontend->type;
mask |= frontend->maskset;
-
- if (frontend->lookup)
- lookup = frontend->lookup;
}
- return lookup(alg_name, type, mask);
+ return crypto_alg_mod_lookup(alg_name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_find_alg);
diff --git a/crypto/cfb.c b/crypto/cfb.c
new file mode 100644
index 000000000000..94ee39bed758
--- /dev/null
+++ b/crypto/cfb.c
@@ -0,0 +1,353 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * CFB: Cipher FeedBack mode
+ *
+ * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
+ *
+ * CFB is a stream cipher mode which is layered on to a block
+ * encryption scheme. It works very much like a one time pad where
+ * the pad is generated initially from the encrypted IV and then
+ * subsequently from the encrypted previous block of ciphertext. The
+ * pad is XOR'd into the plain text to get the final ciphertext.
+ *
+ * The scheme of CFB is best described by wikipedia:
+ *
+ * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
+ *
+ * Note that since the pad for both encryption and decryption is
+ * generated by an encryption operation, CFB never uses the block
+ * decryption function.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+struct crypto_cfb_ctx {
+ struct crypto_cipher *child;
+};
+
+static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *child = ctx->child;
+
+ return crypto_cipher_blocksize(child);
+}
+
+static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_cipher_encrypt_one(ctx->child, dst, src);
+}
+
+/* final encrypt and decrypt is the same */
+static void crypto_cfb_final(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ u8 tmp[bsize + alignmask];
+ u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+ unsigned int nbytes = walk->nbytes;
+
+ crypto_cfb_encrypt_one(tfm, iv, stream);
+ crypto_xor_cpy(dst, stream, src, nbytes);
+}
+
+static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, dst);
+ crypto_xor(dst, src, bsize);
+ memcpy(iv, dst, bsize);
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+ u8 tmp[bsize];
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, tmp);
+ crypto_xor(src, tmp, bsize);
+ iv = src;
+
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
+ unsigned int bsize = crypto_cfb_bsize(tfm);
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes >= bsize) {
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ err = crypto_cfb_encrypt_inplace(&walk, tfm);
+ else
+ err = crypto_cfb_encrypt_segment(&walk, tfm);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ if (walk.nbytes) {
+ crypto_cfb_final(&walk, tfm);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, dst);
+ crypto_xor(dst, iv, bsize);
+ iv = src;
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+ u8 tmp[bsize];
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, tmp);
+ memcpy(iv, src, bsize);
+ crypto_xor(src, tmp, bsize);
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ if (walk->src.virt.addr == walk->dst.virt.addr)
+ return crypto_cfb_decrypt_inplace(walk, tfm);
+ else
+ return crypto_cfb_decrypt_segment(walk, tfm);
+}
+
+static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err;
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int crypto_cfb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes >= bsize) {
+ err = crypto_cfb_decrypt_blocks(&walk, tfm);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ if (walk.nbytes) {
+ crypto_cfb_final(&walk, tfm);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_cipher(ctx->child);
+}
+
+static void crypto_cfb_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct crypto_spawn *spawn;
+ struct crypto_alg *alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ goto err_free_inst;
+
+ mask = CRYPTO_ALG_TYPE_MASK |
+ crypto_requires_off(algt->type, algt->mask,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
+ err = PTR_ERR(alg);
+ if (IS_ERR(alg))
+ goto err_free_inst;
+
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
+ if (err)
+ goto err_drop_spawn;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ /* we're a stream cipher independend of the crypto cra_blocksize */
+ inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
+
+ inst->alg.init = crypto_cfb_init_tfm;
+ inst->alg.exit = crypto_cfb_exit_tfm;
+
+ inst->alg.setkey = crypto_cfb_setkey;
+ inst->alg.encrypt = crypto_cfb_encrypt;
+ inst->alg.decrypt = crypto_cfb_decrypt;
+
+ inst->free = crypto_cfb_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
+ kfree(inst);
+ goto out;
+}
+
+static struct crypto_template crypto_cfb_tmpl = {
+ .name = "cfb",
+ .create = crypto_cfb_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_cfb_module_init(void)
+{
+ return crypto_register_template(&crypto_cfb_tmpl);
+}
+
+static void __exit crypto_cfb_module_exit(void)
+{
+ crypto_unregister_template(&crypto_cfb_tmpl);
+}
+
+module_init(crypto_cfb_module_init);
+module_exit(crypto_cfb_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CFB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("cfb");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 61e7c4e02fd2..992e8d8dcdd9 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -15,13 +15,50 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <crypto/engine.h>
-#include <crypto/internal/hash.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
/**
+ * crypto_finalize_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+static void crypto_finalize_request(struct crypto_engine *engine,
+ struct crypto_async_request *req, int err)
+{
+ unsigned long flags;
+ bool finalize_cur_req = false;
+ int ret;
+ struct crypto_engine_ctx *enginectx;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == req)
+ finalize_cur_req = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (finalize_cur_req) {
+ enginectx = crypto_tfm_ctx(req->tfm);
+ if (engine->cur_req_prepared &&
+ enginectx->op.unprepare_request) {
+ ret = enginectx->op.unprepare_request(engine, req);
+ if (ret)
+ dev_err(engine->dev, "failed to unprepare request\n");
+ }
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->cur_req = NULL;
+ engine->cur_req_prepared = false;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
+
+ req->complete(req, err);
+
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
+}
+
+/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
* @in_kthread: true if we are in the context of the request pump thread
@@ -34,11 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
- struct ahash_request *hreq;
- struct ablkcipher_request *breq;
unsigned long flags;
bool was_busy = false;
- int ret, rtype;
+ int ret;
+ struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -94,7 +130,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
- rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
@@ -104,57 +139,31 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
}
- switch (rtype) {
- case CRYPTO_ALG_TYPE_AHASH:
- hreq = ahash_request_cast(engine->cur_req);
- if (engine->prepare_hash_request) {
- ret = engine->prepare_hash_request(engine, hreq);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err;
- }
- engine->cur_req_prepared = true;
- }
- ret = engine->hash_one_request(engine, hreq);
- if (ret) {
- dev_err(engine->dev, "failed to hash one request from queue\n");
- goto req_err;
- }
- return;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- breq = ablkcipher_request_cast(engine->cur_req);
- if (engine->prepare_cipher_request) {
- ret = engine->prepare_cipher_request(engine, breq);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err;
- }
- engine->cur_req_prepared = true;
- }
- ret = engine->cipher_one_request(engine, breq);
+ enginectx = crypto_tfm_ctx(async_req->tfm);
+
+ if (enginectx->op.prepare_request) {
+ ret = enginectx->op.prepare_request(engine, async_req);
if (ret) {
- dev_err(engine->dev, "failed to cipher one request from queue\n");
+ dev_err(engine->dev, "failed to prepare request: %d\n",
+ ret);
goto req_err;
}
- return;
- default:
- dev_err(engine->dev, "failed to prepare request of unknown type\n");
- return;
+ engine->cur_req_prepared = true;
+ }
+ if (!enginectx->op.do_one_request) {
+ dev_err(engine->dev, "failed to do request\n");
+ ret = -EINVAL;
+ goto req_err;
}
+ ret = enginectx->op.do_one_request(engine, async_req);
+ if (ret) {
+ dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
+ goto req_err;
+ }
+ return;
req_err:
- switch (rtype) {
- case CRYPTO_ALG_TYPE_AHASH:
- hreq = ahash_request_cast(engine->cur_req);
- crypto_finalize_hash_request(engine, hreq, ret);
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- breq = ablkcipher_request_cast(engine->cur_req);
- crypto_finalize_cipher_request(engine, breq, ret);
- break;
- }
+ crypto_finalize_request(engine, async_req, ret);
return;
out:
@@ -170,13 +179,12 @@ static void crypto_pump_work(struct kthread_work *work)
}
/**
- * crypto_transfer_cipher_request - transfer the new request into the
- * enginequeue
+ * crypto_transfer_request - transfer the new request into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req,
+static int crypto_transfer_request(struct crypto_engine *engine,
+ struct crypto_async_request *req,
bool need_pump)
{
unsigned long flags;
@@ -189,7 +197,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
return -ESHUTDOWN;
}
- ret = ablkcipher_enqueue_request(&engine->queue, req);
+ ret = crypto_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
kthread_queue_work(engine->kworker, &engine->pump_requests);
@@ -197,102 +205,131 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
/**
- * crypto_transfer_cipher_request_to_engine - transfer one request to list
+ * crypto_transfer_request_to_engine - transfer one request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+ struct crypto_async_request *req)
{
- return crypto_transfer_cipher_request(engine, req, true);
+ return crypto_transfer_request(engine, req, true);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
/**
- * crypto_transfer_hash_request - transfer the new request into the
- * enginequeue
+ * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
+ * to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
+ * TODO: Remove this function when skcipher conversion is finished
*/
-int crypto_transfer_hash_request(struct crypto_engine *engine,
- struct ahash_request *req, bool need_pump)
+int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&engine->queue_lock, flags);
-
- if (!engine->running) {
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- return -ESHUTDOWN;
- }
-
- ret = ahash_enqueue_request(&engine->queue, req);
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
- if (!engine->busy && need_pump)
- kthread_queue_work(engine->kworker, &engine->pump_requests);
+/**
+ * crypto_transfer_aead_request_to_engine - transfer one aead_request
+ * to list into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
+ struct aead_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- return ret;
+/**
+ * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
+ * to list into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
+ struct akcipher_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
+EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
/**
- * crypto_transfer_hash_request_to_engine - transfer one request to list
- * into the engine queue
+ * crypto_transfer_hash_request_to_engine - transfer one ahash_request
+ * to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req)
{
- return crypto_transfer_hash_request(engine, req, true);
+ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
- * crypto_finalize_cipher_request - finalize one request if the request is done
+ * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
+ * to list into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
+ struct skcipher_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
+
+/**
+ * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
+ * the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
+ * TODO: Remove this function when skcipher conversion is finished
*/
-void crypto_finalize_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err)
+void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err)
{
- unsigned long flags;
- bool finalize_cur_req = false;
- int ret;
-
- spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == &req->base)
- finalize_cur_req = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
-
- if (finalize_cur_req) {
- if (engine->cur_req_prepared &&
- engine->unprepare_cipher_request) {
- ret = engine->unprepare_cipher_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->cur_req = NULL;
- engine->cur_req_prepared = false;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- }
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
- req->base.complete(&req->base, err);
+/**
+ * crypto_finalize_aead_request - finalize one aead_request if
+ * the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_aead_request(struct crypto_engine *engine,
+ struct aead_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
- kthread_queue_work(engine->kworker, &engine->pump_requests);
+/**
+ * crypto_finalize_akcipher_request - finalize one akcipher_request if
+ * the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_akcipher_request(struct crypto_engine *engine,
+ struct akcipher_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
}
-EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
+EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
/**
- * crypto_finalize_hash_request - finalize one request if the request is done
+ * crypto_finalize_hash_request - finalize one ahash_request if
+ * the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
@@ -300,35 +337,25 @@ EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err)
{
- unsigned long flags;
- bool finalize_cur_req = false;
- int ret;
-
- spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == &req->base)
- finalize_cur_req = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
-
- if (finalize_cur_req) {
- if (engine->cur_req_prepared &&
- engine->unprepare_hash_request) {
- ret = engine->unprepare_hash_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->cur_req = NULL;
- engine->cur_req_prepared = false;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- }
-
- req->base.complete(&req->base, err);
-
- kthread_queue_work(engine->kworker, &engine->pump_requests);
+ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
+ * crypto_finalize_skcipher_request - finalize one skcipher_request if
+ * the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_skcipher_request(struct crypto_engine *engine,
+ struct skcipher_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
+
+/**
* crypto_engine_start - start the hardware engine
* @engine: the hardware engine need to be started
*
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 5c291eedaa70..0e89b5457cab 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -271,7 +271,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
return -ENOENT;
err = -ENOMEM;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
goto drop_alg;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 18f32f2a5e1c..9c066b5ac12d 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1025,9 +1025,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *product, *pk;
- u64 priv[ndigits];
- u64 rand_z[ndigits];
- unsigned int nbytes;
+ u64 *priv, *rand_z;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !public_key || !curve) {
@@ -1035,14 +1033,22 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto out;
}
- nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ priv = kmalloc_array(ndigits, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
- get_random_bytes(rand_z, nbytes);
+ rand_z = kmalloc_array(ndigits, sizeof(*rand_z), GFP_KERNEL);
+ if (!rand_z) {
+ ret = -ENOMEM;
+ goto kfree_out;
+ }
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
- goto out;
+ goto kfree_out;
}
product = ecc_alloc_point(ndigits);
@@ -1051,6 +1057,8 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto err_alloc_product;
}
+ get_random_bytes(rand_z, ndigits << ECC_DIGITS_TO_BYTES_SHIFT);
+
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
@@ -1065,6 +1073,9 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_free_point(product);
err_alloc_product:
ecc_free_point(pk);
+kfree_out:
+ kzfree(priv);
+ kzfree(rand_z);
out:
return ret;
}
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 3aca0933ec44..d2ec33f0e098 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -89,12 +89,19 @@ static int ecdh_compute_value(struct kpp_request *req)
if (!shared_secret)
goto free_pubkey;
- copied = sg_copy_to_buffer(req->src, 1, public_key,
- public_key_sz);
- if (copied != public_key_sz) {
- ret = -EINVAL;
+ /* from here on it's invalid parameters */
+ ret = -EINVAL;
+
+ /* must have exactly two points to be on the curve */
+ if (public_key_sz != req->src_len)
+ goto free_all;
+
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ public_key_sz),
+ public_key, public_key_sz);
+ if (copied != public_key_sz)
goto free_all;
- }
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
ctx->private_key, public_key,
@@ -111,7 +118,11 @@ static int ecdh_compute_value(struct kpp_request *req)
if (ret < 0)
goto free_all;
- copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
+ /* might want less than we've got */
+ nbytes = min_t(size_t, nbytes, req->dst_len);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
+ nbytes),
+ buf, nbytes);
if (copied != nbytes)
ret = -EINVAL;
diff --git a/crypto/internal.h b/crypto/internal.h
index 5ac27fba10e8..9a3f39939fba 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -67,7 +67,6 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
}
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
-struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index cbbd7c50ad19..954a7064a179 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -28,13 +28,31 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-#include <crypto/lrw.h>
#define LRW_BUFFER_SIZE 128u
+#define LRW_BLOCK_SIZE 16
+
struct priv {
struct crypto_skcipher *child;
- struct lrw_table_ctx table;
+
+ /*
+ * optimizes multiplying a random (non incrementing, as at the
+ * start of a new sector) value with key2, we could also have
+ * used 4k optimization tables or no optimization at all. In the
+ * latter case we would have to store key2 here
+ */
+ struct gf128mul_64k *table;
+
+ /*
+ * stores:
+ * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+ * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+ * key2*{ 0,0,...1,1,1,1,1 }, etc
+ * needed for optimized multiplication of incrementing values
+ * with key2
+ */
+ be128 mulinc[128];
};
struct rctx {
@@ -65,11 +83,25 @@ static inline void setbit128_bbe(void *b, int bit)
), b);
}
-int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
+ struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
+ int err, bsize = LRW_BLOCK_SIZE;
+ const u8 *tweak = key + keylen - bsize;
be128 tmp = { 0 };
int i;
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen - bsize);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
+
if (ctx->table)
gf128mul_free_64k(ctx->table);
@@ -87,34 +119,6 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
return 0;
}
-EXPORT_SYMBOL_GPL(lrw_init_table);
-
-void lrw_free_table(struct lrw_table_ctx *ctx)
-{
- if (ctx->table)
- gf128mul_free_64k(ctx->table);
-}
-EXPORT_SYMBOL_GPL(lrw_free_table);
-
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct priv *ctx = crypto_skcipher_ctx(parent);
- struct crypto_skcipher *child = ctx->child;
- int err, bsize = LRW_BLOCK_SIZE;
- const u8 *tweak = key + keylen - bsize;
-
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen - bsize);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->table, tweak);
-}
static inline void inc(be128 *iv)
{
@@ -238,7 +242,7 @@ static int pre_crypt(struct skcipher_request *req)
/* T <- I*Key2, using the optimization
* discussed in the specification */
be128_xor(&rctx->t, &rctx->t,
- &ctx->table.mulinc[get_index128(iv)]);
+ &ctx->mulinc[get_index128(iv)]);
inc(iv);
} while ((avail -= bs) >= bs);
@@ -301,7 +305,7 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
memcpy(&rctx->t, req->iv, sizeof(rctx->t));
/* T <- I*Key2 */
- gf128mul_64k_bbe(&rctx->t, ctx->table.table);
+ gf128mul_64k_bbe(&rctx->t, ctx->table);
return 0;
}
@@ -313,7 +317,7 @@ static void exit_crypt(struct skcipher_request *req)
rctx->left = 0;
if (rctx->ext)
- kfree(rctx->ext);
+ kzfree(rctx->ext);
}
static int do_encrypt(struct skcipher_request *req, int err)
@@ -416,85 +420,6 @@ static int decrypt(struct skcipher_request *req)
return do_decrypt(req, init_crypt(req, decrypt_done));
}
-int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
- struct scatterlist *ssrc, unsigned int nbytes,
- struct lrw_crypt_req *req)
-{
- const unsigned int bsize = LRW_BLOCK_SIZE;
- const unsigned int max_blks = req->tbuflen / bsize;
- struct lrw_table_ctx *ctx = req->table_ctx;
- struct blkcipher_walk walk;
- unsigned int nblocks;
- be128 *iv, *src, *dst, *t;
- be128 *t_buf = req->tbuf;
- int err, i;
-
- BUG_ON(max_blks < 1);
-
- blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
- nbytes = walk.nbytes;
- if (!nbytes)
- return err;
-
- nblocks = min(walk.nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
-
- /* calculate first value of T */
- iv = (be128 *)walk.iv;
- t_buf[0] = *iv;
-
- /* T <- I*Key2 */
- gf128mul_64k_bbe(&t_buf[0], ctx->table);
-
- i = 0;
- goto first;
-
- for (;;) {
- do {
- for (i = 0; i < nblocks; i++) {
- /* T <- I*Key2, using the optimization
- * discussed in the specification */
- be128_xor(&t_buf[i], t,
- &ctx->mulinc[get_index128(iv)]);
- inc(iv);
-first:
- t = &t_buf[i];
-
- /* PP <- T xor P */
- be128_xor(dst + i, t, src + i);
- }
-
- /* CC <- E(Key2,PP) */
- req->crypt_fn(req->crypt_ctx, (u8 *)dst,
- nblocks * bsize);
-
- /* C <- T xor CC */
- for (i = 0; i < nblocks; i++)
- be128_xor(dst + i, dst + i, &t_buf[i]);
-
- src += nblocks;
- dst += nblocks;
- nbytes -= nblocks * bsize;
- nblocks = min(nbytes / bsize, max_blks);
- } while (nblocks > 0);
-
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- if (!nbytes)
- break;
-
- nblocks = min(nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(lrw_crypt);
-
static int init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
@@ -518,7 +443,8 @@ static void exit_tfm(struct crypto_skcipher *tfm)
{
struct priv *ctx = crypto_skcipher_ctx(tfm);
- lrw_free_table(&ctx->table);
+ if (ctx->table)
+ gf128mul_free_64k(ctx->table);
crypto_free_skcipher(ctx->child);
}
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index fe5129d6ff4e..f14152147ce8 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -367,7 +367,7 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
goto out;
rctx->out = req->result;
- err = ahash_mcryptd_update(&rctx->areq);
+ err = crypto_ahash_update(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
@@ -394,7 +394,7 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
goto out;
rctx->out = req->result;
- err = ahash_mcryptd_final(&rctx->areq);
+ err = crypto_ahash_final(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
@@ -420,7 +420,7 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
- err = ahash_mcryptd_finup(&rctx->areq);
+ err = crypto_ahash_finup(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
@@ -455,7 +455,7 @@ static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
rctx->complete, req_async);
rctx->out = req->result;
- err = ahash_mcryptd_digest(desc);
+ err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
out:
local_bh_disable();
@@ -612,32 +612,6 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
}
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-int ahash_mcryptd_digest(struct ahash_request *desc)
-{
- return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
-}
-
-int ahash_mcryptd_update(struct ahash_request *desc)
-{
- /* alignment is to be done by multi-buffer crypto algorithm if needed */
-
- return crypto_ahash_update(desc);
-}
-
-int ahash_mcryptd_finup(struct ahash_request *desc)
-{
- /* alignment is to be done by multi-buffer crypto algorithm if needed */
-
- return crypto_ahash_finup(desc);
-}
-
-int ahash_mcryptd_final(struct ahash_request *desc)
-{
- /* alignment is to be done by multi-buffer crypto algorithm if needed */
-
- return crypto_ahash_final(desc);
-}
-
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
diff --git a/crypto/md4.c b/crypto/md4.c
index 3515af425cc9..810fefb0a007 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -64,23 +64,6 @@ static inline u32 H(u32 x, u32 y, u32 z)
#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
-/* XXX: this stuff can be optimized */
-static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __le32_to_cpus(buf);
- buf++;
- }
-}
-
-static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __cpu_to_le32s(buf);
- buf++;
- }
-}
-
static void md4_transform(u32 *hash, u32 const *in)
{
u32 a, b, c, d;
diff --git a/crypto/md5.c b/crypto/md5.c
index f7ae1a48225b..f776ef43d621 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -32,23 +32,6 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
-/* XXX: this stuff can be optimized */
-static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __le32_to_cpus(buf);
- buf++;
- }
-}
-
-static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __cpu_to_le32s(buf);
- buf++;
- }
-}
-
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 2908f93c3e55..9893dbfc1af4 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -192,7 +192,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
if (likely(!pad_len))
goto out;
- out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
+ out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
err = -ENOMEM;
if (!out_buf)
goto out;
diff --git a/crypto/simd.c b/crypto/simd.c
index 208226d7f908..ea7240be3001 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -221,4 +221,54 @@ void simd_skcipher_free(struct simd_skcipher_alg *salg)
}
EXPORT_SYMBOL_GPL(simd_skcipher_free);
+int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs)
+{
+ int err;
+ int i;
+ const char *algname;
+ const char *drvname;
+ const char *basename;
+ struct simd_skcipher_alg *simd;
+
+ err = crypto_register_skciphers(algs, count);
+ if (err)
+ return err;
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
+ WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
+ algname = algs[i].base.cra_name + 2;
+ drvname = algs[i].base.cra_driver_name + 2;
+ basename = algs[i].base.cra_driver_name;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto err_unregister;
+ simd_algs[i] = simd;
+ }
+ return 0;
+
+err_unregister:
+ simd_unregister_skciphers(algs, count, simd_algs);
+ return err;
+}
+EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
+
+void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs)
+{
+ int i;
+
+ crypto_unregister_skciphers(algs, count);
+
+ for (i = 0; i < count; i++) {
+ if (simd_algs[i]) {
+ simd_skcipher_free(simd_algs[i]);
+ simd_algs[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
+
MODULE_LICENSE("GPL");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
new file mode 100644
index 000000000000..f537a2766c55
--- /dev/null
+++ b/crypto/sm4_generic.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * SM4 Cipher Algorithm.
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * All rights reserved.
+ */
+
+#include <crypto/sm4.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+static const u32 fk[4] = {
+ 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
+};
+
+static const u8 sbox[256] = {
+ 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+ 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+ 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+ 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+ 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+ 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+ 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+ 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+ 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+ 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+ 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+ 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+ 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+ 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+ 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+ 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+ 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+ 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+ 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+ 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+ 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+ 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+ 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+ 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+ 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+ 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+ 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+ 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+ 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+ 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+ 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+ 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
+};
+
+static const u32 ck[] = {
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
+
+static u32 sm4_t_non_lin_sub(u32 x)
+{
+ int i;
+ u8 *b = (u8 *)&x;
+
+ for (i = 0; i < 4; ++i)
+ b[i] = sbox[b[i]];
+
+ return x;
+}
+
+static u32 sm4_key_lin_sub(u32 x)
+{
+ return x ^ rol32(x, 13) ^ rol32(x, 23);
+
+}
+
+static u32 sm4_enc_lin_sub(u32 x)
+{
+ return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
+}
+
+static u32 sm4_key_sub(u32 x)
+{
+ return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static u32 sm4_enc_sub(u32 x)
+{
+ return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static u32 sm4_round(const u32 *x, const u32 rk)
+{
+ return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
+}
+
+
+/**
+ * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ u32 rk[4], t;
+ const u32 *key = (u32 *)in_key;
+ int i;
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < 4; ++i)
+ rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
+
+ for (i = 0; i < 32; ++i) {
+ t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
+ ctx->rkey_enc[i] = t;
+ rk[0] = rk[1];
+ rk[1] = rk[2];
+ rk[2] = rk[3];
+ rk[3] = t;
+ }
+
+ for (i = 0; i < 32; ++i)
+ ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
+
+/**
+ * crypto_sm4_set_key - Set the AES key.
+ * @tfm: The %crypto_tfm that is used in the context.
+ * @in_key: The input key.
+ * @key_len: The size of the key.
+ *
+ * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
+ * is set. The function uses crypto_sm4_expand_key() to expand the key.
+ * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
+ * retrieved with crypto_tfm_ctx().
+ */
+int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+
+ ret = crypto_sm4_expand_key(ctx, in_key, key_len);
+ if (!ret)
+ return 0;
+
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
+
+static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
+{
+ u32 x[4], i, t;
+
+ for (i = 0; i < 4; ++i)
+ x[i] = get_unaligned_be32(&in[i]);
+
+ for (i = 0; i < 32; ++i) {
+ t = sm4_round(x, rk[i]);
+ x[0] = x[1];
+ x[1] = x[2];
+ x[2] = x[3];
+ x[3] = t;
+ }
+
+ for (i = 0; i < 4; ++i)
+ put_unaligned_be32(x[3 - i], &out[i]);
+}
+
+/* encrypt a block of text */
+
+static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
+}
+
+/* decrypt a block of text */
+
+static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
+}
+
+static struct crypto_alg sm4_alg = {
+ .cra_name = "sm4",
+ .cra_driver_name = "sm4-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_sm4_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = SM4_KEY_SIZE,
+ .cia_max_keysize = SM4_KEY_SIZE,
+ .cia_setkey = crypto_sm4_set_key,
+ .cia_encrypt = sm4_encrypt,
+ .cia_decrypt = sm4_decrypt
+ }
+ }
+};
+
+static int __init sm4_init(void)
+{
+ return crypto_register_alg(&sm4_alg);
+}
+
+static void __exit sm4_fini(void)
+{
+ crypto_unregister_alg(&sm4_alg);
+}
+
+module_init(sm4_init);
+module_exit(sm4_fini);
+
+MODULE_DESCRIPTION("SM4 Cipher Algorithm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-generic");
diff --git a/crypto/speck.c b/crypto/speck.c
new file mode 100644
index 000000000000..58aa9f7f91f7
--- /dev/null
+++ b/crypto/speck.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Speck: a lightweight block cipher
+ *
+ * Copyright (c) 2018 Google, Inc
+ *
+ * Speck has 10 variants, including 5 block sizes. For now we only implement
+ * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
+ * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
+ * and a key size of K bits. The Speck128 variants are believed to be the most
+ * secure variants, and they use the same block size and key sizes as AES. The
+ * Speck64 variants are less secure, but on 32-bit processors are usually
+ * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
+ * secure and/or not as well suited for implementation on either 32-bit or
+ * 64-bit processors, so are omitted.
+ *
+ * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
+ * https://eprint.iacr.org/2013/404.pdf
+ *
+ * In a correspondence, the Speck designers have also clarified that the words
+ * should be interpreted in little-endian format, and the words should be
+ * ordered such that the first word of each block is 'y' rather than 'x', and
+ * the first key word (rather than the last) becomes the first round key.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/speck.h>
+#include <linux/bitops.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+/* Speck128 */
+
+static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
+{
+ *x = ror64(*x, 8);
+ *x += *y;
+ *x ^= k;
+ *y = rol64(*y, 3);
+ *y ^= *x;
+}
+
+static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
+{
+ *y ^= *x;
+ *y = ror64(*y, 3);
+ *x ^= k;
+ *x -= *y;
+ *x = rol64(*x, 8);
+}
+
+void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u64 y = get_unaligned_le64(in);
+ u64 x = get_unaligned_le64(in + 8);
+ int i;
+
+ for (i = 0; i < ctx->nrounds; i++)
+ speck128_round(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le64(y, out);
+ put_unaligned_le64(x, out + 8);
+}
+EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
+
+static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u64 y = get_unaligned_le64(in);
+ u64 x = get_unaligned_le64(in + 8);
+ int i;
+
+ for (i = ctx->nrounds - 1; i >= 0; i--)
+ speck128_unround(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le64(y, out);
+ put_unaligned_le64(x, out + 8);
+}
+EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
+
+static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ u64 l[3];
+ u64 k;
+ int i;
+
+ switch (keylen) {
+ case SPECK128_128_KEY_SIZE:
+ k = get_unaligned_le64(key);
+ l[0] = get_unaligned_le64(key + 8);
+ ctx->nrounds = SPECK128_128_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck128_round(&l[0], &k, i);
+ }
+ break;
+ case SPECK128_192_KEY_SIZE:
+ k = get_unaligned_le64(key);
+ l[0] = get_unaligned_le64(key + 8);
+ l[1] = get_unaligned_le64(key + 16);
+ ctx->nrounds = SPECK128_192_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck128_round(&l[i % 2], &k, i);
+ }
+ break;
+ case SPECK128_256_KEY_SIZE:
+ k = get_unaligned_le64(key);
+ l[0] = get_unaligned_le64(key + 8);
+ l[1] = get_unaligned_le64(key + 16);
+ l[2] = get_unaligned_le64(key + 24);
+ ctx->nrounds = SPECK128_256_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck128_round(&l[i % 3], &k, i);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
+
+static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
+}
+
+/* Speck64 */
+
+static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
+{
+ *x = ror32(*x, 8);
+ *x += *y;
+ *x ^= k;
+ *y = rol32(*y, 3);
+ *y ^= *x;
+}
+
+static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
+{
+ *y ^= *x;
+ *y = ror32(*y, 3);
+ *x ^= k;
+ *x -= *y;
+ *x = rol32(*x, 8);
+}
+
+void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u32 y = get_unaligned_le32(in);
+ u32 x = get_unaligned_le32(in + 4);
+ int i;
+
+ for (i = 0; i < ctx->nrounds; i++)
+ speck64_round(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le32(y, out);
+ put_unaligned_le32(x, out + 4);
+}
+EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
+
+static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u32 y = get_unaligned_le32(in);
+ u32 x = get_unaligned_le32(in + 4);
+ int i;
+
+ for (i = ctx->nrounds - 1; i >= 0; i--)
+ speck64_unround(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le32(y, out);
+ put_unaligned_le32(x, out + 4);
+}
+EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
+
+static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ u32 l[3];
+ u32 k;
+ int i;
+
+ switch (keylen) {
+ case SPECK64_96_KEY_SIZE:
+ k = get_unaligned_le32(key);
+ l[0] = get_unaligned_le32(key + 4);
+ l[1] = get_unaligned_le32(key + 8);
+ ctx->nrounds = SPECK64_96_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck64_round(&l[i % 2], &k, i);
+ }
+ break;
+ case SPECK64_128_KEY_SIZE:
+ k = get_unaligned_le32(key);
+ l[0] = get_unaligned_le32(key + 4);
+ l[1] = get_unaligned_le32(key + 8);
+ l[2] = get_unaligned_le32(key + 12);
+ ctx->nrounds = SPECK64_128_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck64_round(&l[i % 3], &k, i);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
+
+static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
+}
+
+/* Algorithm definitions */
+
+static struct crypto_alg speck_algs[] = {
+ {
+ .cra_name = "speck128",
+ .cra_driver_name = "speck128-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SPECK128_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = SPECK128_128_KEY_SIZE,
+ .cia_max_keysize = SPECK128_256_KEY_SIZE,
+ .cia_setkey = speck128_setkey,
+ .cia_encrypt = speck128_encrypt,
+ .cia_decrypt = speck128_decrypt
+ }
+ }
+ }, {
+ .cra_name = "speck64",
+ .cra_driver_name = "speck64-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SPECK64_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = SPECK64_96_KEY_SIZE,
+ .cia_max_keysize = SPECK64_128_KEY_SIZE,
+ .cia_setkey = speck64_setkey,
+ .cia_encrypt = speck64_encrypt,
+ .cia_decrypt = speck64_decrypt
+ }
+ }
+ }
+};
+
+static int __init speck_module_init(void)
+{
+ return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+static void __exit speck_module_exit(void)
+{
+ crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+module_init(speck_module_init);
+module_exit(speck_module_exit);
+
+MODULE_DESCRIPTION("Speck block cipher (generic)");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("speck128");
+MODULE_ALIAS_CRYPTO("speck128-generic");
+MODULE_ALIAS_CRYPTO("speck64");
+MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 14213a096fd2..51fe7c8744ae 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1983,6 +1983,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
case 190:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
break;
+ case 191:
+ ret += tcrypt_test("ecb(sm4)");
+ break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d5e23a142a04..af4a01c5037b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3001,6 +3001,33 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ecb(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(sm4_enc_tv_template),
+ .dec = __VECS(sm4_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "ecb(speck128)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck128_enc_tv_template),
+ .dec = __VECS(speck128_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "ecb(speck64)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck64_enc_tv_template),
+ .dec = __VECS(speck64_dec_tv_template)
+ }
+ }
+ }, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
@@ -3558,6 +3585,24 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "xts(speck128)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck128_xts_enc_tv_template),
+ .dec = __VECS(speck128_xts_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "xts(speck64)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck64_xts_enc_tv_template),
+ .dec = __VECS(speck64_xts_dec_tv_template)
+ }
+ }
+ }, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 6044f6906bd6..004c0a0f8004 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -548,7 +548,7 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
{
.key =
- "\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
+ "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
"\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28"
"\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67"
"\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d"
@@ -597,8 +597,8 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
"\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a"
"\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda"
"\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46"
- "\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30"
- "\x02\x01\x30",
+ "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00"
+ "\x02\x01\x00",
.key_len = 804,
/*
* m is SHA256 hash of following message:
@@ -2044,6 +2044,265 @@ static const struct hash_testvec crct10dif_tv_template[] = {
.digest = (u8 *)(u16 []){ 0x44c6 },
.np = 4,
.tap = { 1, 255, 57, 6 },
+ }, {
+ .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
+ "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
+ "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
+ "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
+ "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
+ "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
+ "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
+ "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
+ "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
+ "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
+ "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
+ "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
+ "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
+ "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
+ "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
+ "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
+ "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
+ "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
+ "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
+ "\x58\xef\x63\xfa\x91\x05\x9c\x33"
+ "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
+ "\x19\xb0\x47\xde\x52\xe9\x80\x17"
+ "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
+ "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
+ "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
+ "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
+ "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
+ "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
+ "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
+ "\x86\x1d\x91\x28\xbf\x33\xca\x61"
+ "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
+ "\x47\xde\x75\x0c\x80\x17\xae\x22"
+ "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
+ "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
+ "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
+ "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
+ "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
+ "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
+ "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
+ "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
+ "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
+ "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
+ "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
+ "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
+ "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
+ "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
+ "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
+ "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
+ "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
+ "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
+ "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
+ "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
+ "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
+ "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
+ "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
+ "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
+ "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
+ "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
+ "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
+ "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
+ "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
+ "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
+ "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
+ "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
+ "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
+ "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
+ "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
+ "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
+ "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
+ "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
+ "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
+ "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
+ "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
+ "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
+ "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
+ "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
+ "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
+ "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
+ "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
+ "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
+ "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
+ "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
+ "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
+ "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
+ "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
+ "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
+ "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
+ "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
+ "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
+ "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
+ "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
+ "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
+ "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
+ "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
+ "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
+ "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
+ "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
+ "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
+ "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
+ "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
+ "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
+ "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
+ "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
+ "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
+ "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
+ "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
+ "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
+ "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
+ "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
+ "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
+ "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
+ "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
+ "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
+ "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
+ "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
+ "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
+ "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
+ "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
+ "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
+ "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
+ "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
+ "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
+ "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
+ "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
+ "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
+ "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
+ "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
+ "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
+ "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
+ "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
+ "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
+ "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
+ "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
+ "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
+ "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
+ "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
+ "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
+ "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
+ "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
+ "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
+ "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
+ "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
+ "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
+ "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
+ "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
+ "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
+ "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
+ "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
+ "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
+ "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
+ "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
+ "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
+ "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
+ "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
+ "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
+ "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
+ "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
+ "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
+ "\x47\xde\x52\xe9\x80\x17\x8b\x22"
+ "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
+ "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
+ "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
+ "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
+ "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
+ "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
+ "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
+ "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
+ "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
+ "\x75\x0c\x80\x17\xae\x22\xb9\x50"
+ "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
+ "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
+ "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
+ "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
+ "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
+ "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
+ "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
+ "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
+ "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
+ "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
+ "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
+ "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
+ "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
+ "\x48\xdf\x53\xea\x81\x18\x8c\x23"
+ "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
+ "\x09\xa0\x37\xce\x42\xd9\x70\x07"
+ "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
+ "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
+ "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
+ "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
+ "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
+ "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
+ "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
+ "\x76\x0d\x81\x18\xaf\x23\xba\x51"
+ "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
+ "\x37\xce\x65\xfc\x70\x07\x9e\x12"
+ "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
+ "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
+ "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
+ "\xff\x73\x0a\xa1\x15\xac\x43\xda"
+ "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
+ "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
+ "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
+ "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
+ "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
+ "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
+ "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
+ "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
+ "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
+ "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
+ "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
+ "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
+ "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
+ "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
+ "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
+ "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
+ "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
+ "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
+ "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
+ "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
+ "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
+ "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
+ "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
+ "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
+ "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
+ "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
+ "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
+ "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
+ "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
+ "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
+ "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
+ "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
+ "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
+ "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
+ "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
+ "\xef\x86\x1d\x91\x28\xbf\x33\xca"
+ "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
+ "\xd3\x47\xde\x75\x0c\x80\x17\xae"
+ "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
+ "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
+ "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
+ "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
+ "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
+ "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
+ "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
+ "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
+ "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
+ "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
+ "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
+ "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
+ "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
+ "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
+ "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
+ "\x67\xfe\x95\x09\xa0\x37\xce\x42"
+ "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
+ "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
+ "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
+ .psize = 2048,
+ .digest = (u8 *)(u16 []){ 0x23ca },
}
};
@@ -14323,6 +14582,1623 @@ static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
},
};
+/*
+ * SM4 test vector taken from the draft RFC
+ * https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016
+ */
+
+static const struct cipher_testvec sm4_enc_tv_template[] = {
+ { /* SM4 Appendix A: Example Calculations. Example 1. */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .ilen = 16,
+ .result = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
+ "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
+ .rlen = 16,
+ }, { /*
+ * SM4 Appendix A: Example Calculations.
+ * Last 10 iterations of Example 2.
+ */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\x99\x4a\xc3\xe7\xc3\x57\x89\x6a"
+ "\x81\xfc\xa8\xe\x38\x3e\xef\x80"
+ "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1",
+ .ilen = 160,
+ .result = "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1"
+ "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
+ "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
+ .rlen = 160
+ }
+};
+
+static const struct cipher_testvec sm4_dec_tv_template[] = {
+ { /* SM4 Appendix A: Example Calculations. Example 1. */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
+ "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
+ .ilen = 16,
+ .result = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .rlen = 16,
+ }, { /*
+ * SM4 Appendix A: Example Calculations.
+ * Last 10 iterations of Example 2.
+ */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1"
+ "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
+ "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
+ .ilen = 160,
+ .result = "\x99\x4a\xc3\xe7\xc3\x57\x89\x6a"
+ "\x81\xfc\xa8\xe\x38\x3e\xef\x80"
+ "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1",
+ .rlen = 160
+ }
+};
+
+/*
+ * Speck test vectors taken from the original paper:
+ * "The Simon and Speck Families of Lightweight Block Ciphers"
+ * https://eprint.iacr.org/2013/404.pdf
+ *
+ * Note that the paper does not make byte and word order clear. But it was
+ * confirmed with the authors that the intended orders are little endian byte
+ * order and (y, x) word order. Equivalently, the printed test vectors, when
+ * looking at only the bytes (ignoring the whitespace that divides them into
+ * words), are backwards: the left-most byte is actually the one with the
+ * highest memory address, while the right-most byte is actually the one with
+ * the lowest memory address.
+ */
+
+static const struct cipher_testvec speck128_enc_tv_template[] = {
+ { /* Speck128/128 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .klen = 16,
+ .input = "\x20\x6d\x61\x64\x65\x20\x69\x74"
+ "\x20\x65\x71\x75\x69\x76\x61\x6c",
+ .ilen = 16,
+ .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
+ "\x65\x32\x78\x79\x51\x98\x5d\xa6",
+ .rlen = 16,
+ }, { /* Speck128/192 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ .klen = 24,
+ .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
+ "\x68\x69\x65\x66\x20\x48\x61\x72",
+ .ilen = 16,
+ .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
+ "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
+ .rlen = 16,
+ }, { /* Speck128/256 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .klen = 32,
+ .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
+ "\x49\x6e\x20\x74\x68\x6f\x73\x65",
+ .ilen = 16,
+ .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
+ "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
+ .rlen = 16,
+ },
+};
+
+static const struct cipher_testvec speck128_dec_tv_template[] = {
+ { /* Speck128/128 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .klen = 16,
+ .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
+ "\x65\x32\x78\x79\x51\x98\x5d\xa6",
+ .ilen = 16,
+ .result = "\x20\x6d\x61\x64\x65\x20\x69\x74"
+ "\x20\x65\x71\x75\x69\x76\x61\x6c",
+ .rlen = 16,
+ }, { /* Speck128/192 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ .klen = 24,
+ .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
+ "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
+ .ilen = 16,
+ .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
+ "\x68\x69\x65\x66\x20\x48\x61\x72",
+ .rlen = 16,
+ }, { /* Speck128/256 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .klen = 32,
+ .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
+ "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
+ .ilen = 16,
+ .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
+ "\x49\x6e\x20\x74\x68\x6f\x73\x65",
+ .rlen = 16,
+ },
+};
+
+/*
+ * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
+ * result recomputed with Speck128 as the cipher
+ */
+
+static const struct cipher_testvec speck128_xts_enc_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ilen = 32,
+ .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
+ "\x3b\x99\x4a\x64\x74\x77\xac\xed"
+ "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
+ "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
+ "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
+ "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
+ "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
+ "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
+ "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
+ "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
+ "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
+ "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
+ "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
+ "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
+ "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
+ "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
+ "\x19\xc5\x58\x84\x63\xb9\x12\x68"
+ "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
+ "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
+ "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
+ "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
+ "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
+ "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
+ "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
+ "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
+ "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
+ "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
+ "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
+ "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
+ "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
+ "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
+ "\xa5\x20\xac\x24\x1c\x73\x59\x73"
+ "\x58\x61\x3a\x87\x58\xb3\x20\x56"
+ "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
+ "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
+ "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
+ "\x09\x35\x71\x50\x65\xac\x92\xe3"
+ "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
+ "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
+ "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
+ "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
+ "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
+ "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
+ "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
+ "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
+ "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
+ "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
+ "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
+ "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
+ "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
+ "\x87\x30\xac\xd5\xea\x73\x49\x10"
+ "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
+ "\x66\x02\x35\x3d\x60\x06\x36\x4f"
+ "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
+ "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
+ "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
+ "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
+ "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
+ "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
+ "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
+ "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
+ "\x51\x66\x02\x69\x04\x97\x36\xd4"
+ "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
+ "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
+ "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
+ "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
+ "\x03\xe7\x05\x39\xf5\x05\x26\xee"
+ "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
+ "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
+ "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
+ "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
+ "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
+ "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95"
+ "\x02\x88\x41\x97\x16\x93\x99\x37"
+ "\x51\x05\x82\x09\x74\x94\x45\x92",
+ .klen = 64,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
+ "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
+ "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
+ "\x92\x99\xde\xd3\x76\xed\xcd\x63"
+ "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
+ "\x79\x36\x31\x19\x62\xae\x10\x7e"
+ "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
+ "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
+ "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
+ "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
+ "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
+ "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
+ "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
+ "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
+ "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
+ "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
+ "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
+ "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
+ "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
+ "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
+ "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
+ "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
+ "\xac\xa5\xd0\x38\xef\x19\x56\x53"
+ "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
+ "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
+ "\xed\x22\x34\x1c\x5d\xed\x17\x06"
+ "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
+ "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
+ "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
+ "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
+ "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
+ "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
+ "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
+ "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
+ "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
+ "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
+ "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
+ "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
+ "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
+ "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
+ "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
+ "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
+ "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
+ "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
+ "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
+ "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
+ "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
+ "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
+ "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
+ "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
+ "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
+ "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
+ "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
+ "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
+ "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
+ "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
+ "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
+ "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
+ "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
+ "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
+ "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
+ "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
+ "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
+ "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
+static const struct cipher_testvec speck128_xts_dec_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
+ "\x3b\x99\x4a\x64\x74\x77\xac\xed"
+ "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
+ "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
+ .ilen = 32,
+ .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
+ "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
+ "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
+ "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
+ "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
+ "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
+ "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
+ "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
+ "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
+ "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
+ "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
+ "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
+ "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
+ "\x19\xc5\x58\x84\x63\xb9\x12\x68"
+ "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
+ "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
+ "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
+ "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
+ "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
+ "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
+ "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
+ "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
+ "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
+ "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
+ "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
+ "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
+ "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
+ "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
+ "\xa5\x20\xac\x24\x1c\x73\x59\x73"
+ "\x58\x61\x3a\x87\x58\xb3\x20\x56"
+ "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
+ "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
+ "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
+ "\x09\x35\x71\x50\x65\xac\x92\xe3"
+ "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
+ "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
+ "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
+ "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
+ "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
+ "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
+ "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
+ "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
+ "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
+ "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
+ "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
+ "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
+ "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
+ "\x87\x30\xac\xd5\xea\x73\x49\x10"
+ "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
+ "\x66\x02\x35\x3d\x60\x06\x36\x4f"
+ "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
+ "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
+ "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
+ "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
+ "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
+ "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
+ "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
+ "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
+ "\x51\x66\x02\x69\x04\x97\x36\xd4"
+ "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
+ "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
+ "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
+ "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
+ "\x03\xe7\x05\x39\xf5\x05\x26\xee"
+ "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
+ "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
+ "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
+ "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
+ "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
+ "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95"
+ "\x02\x88\x41\x97\x16\x93\x99\x37"
+ "\x51\x05\x82\x09\x74\x94\x45\x92",
+ .klen = 64,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
+ "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
+ "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
+ "\x92\x99\xde\xd3\x76\xed\xcd\x63"
+ "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
+ "\x79\x36\x31\x19\x62\xae\x10\x7e"
+ "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
+ "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
+ "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
+ "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
+ "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
+ "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
+ "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
+ "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
+ "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
+ "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
+ "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
+ "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
+ "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
+ "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
+ "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
+ "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
+ "\xac\xa5\xd0\x38\xef\x19\x56\x53"
+ "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
+ "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
+ "\xed\x22\x34\x1c\x5d\xed\x17\x06"
+ "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
+ "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
+ "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
+ "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
+ "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
+ "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
+ "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
+ "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
+ "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
+ "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
+ "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
+ "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
+ "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
+ "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
+ "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
+ "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
+ "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
+ "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
+ "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
+ "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
+ "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
+ "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
+ "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
+ "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
+ "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
+ "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
+ "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
+ "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
+ "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
+ "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
+ "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
+ "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
+ "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
+ "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
+ "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
+ "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
+ "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
+ "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
+static const struct cipher_testvec speck64_enc_tv_template[] = {
+ { /* Speck64/96 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13",
+ .klen = 12,
+ .input = "\x65\x61\x6e\x73\x20\x46\x61\x74",
+ .ilen = 8,
+ .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
+ .rlen = 8,
+ }, { /* Speck64/128 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13\x18\x19\x1a\x1b",
+ .klen = 16,
+ .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
+ .ilen = 8,
+ .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
+ .rlen = 8,
+ },
+};
+
+static const struct cipher_testvec speck64_dec_tv_template[] = {
+ { /* Speck64/96 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13",
+ .klen = 12,
+ .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
+ .ilen = 8,
+ .result = "\x65\x61\x6e\x73\x20\x46\x61\x74",
+ .rlen = 8,
+ }, { /* Speck64/128 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13\x18\x19\x1a\x1b",
+ .klen = 16,
+ .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
+ .ilen = 8,
+ .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
+ .rlen = 8,
+ },
+};
+
+/*
+ * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result
+ * recomputed with Speck64 as the cipher, and key lengths adjusted
+ */
+
+static const struct cipher_testvec speck64_xts_enc_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ilen = 32,
+ .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
+ "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
+ "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
+ "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
+ "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
+ "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
+ "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
+ "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
+ "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
+ "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
+ "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
+ "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
+ "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
+ "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
+ "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
+ "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
+ "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
+ "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
+ "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
+ "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
+ "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
+ "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
+ "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
+ "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
+ "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
+ "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
+ "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
+ "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
+ "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
+ "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
+ "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
+ "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
+ "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
+ "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
+ "\x07\xff\xf3\x72\x74\x48\xb5\x40"
+ "\x50\xb5\xdd\x90\x43\x31\x18\x15"
+ "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
+ "\x29\x93\x90\x8b\xda\x07\xf0\x35"
+ "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
+ "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
+ "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
+ "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
+ "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
+ "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
+ "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
+ "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
+ "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
+ "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
+ "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
+ "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
+ "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
+ "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
+ "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
+ "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
+ "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
+ "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
+ "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
+ "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
+ "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
+ "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
+ "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
+ "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
+ "\x59\xda\xee\x1a\x22\x18\xda\x0d"
+ "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
+ "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
+ "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
+ "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
+ "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
+ "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
+ "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
+ "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
+ "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
+ "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27",
+ .klen = 32,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
+ "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
+ "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
+ "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
+ "\x78\x16\xea\x80\xdb\x33\x75\x94"
+ "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
+ "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
+ "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
+ "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
+ "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
+ "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
+ "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
+ "\x84\x5e\x46\xed\x20\x89\xb6\x44"
+ "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
+ "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
+ "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
+ "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
+ "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
+ "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
+ "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
+ "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
+ "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
+ "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
+ "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
+ "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
+ "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
+ "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
+ "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
+ "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
+ "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
+ "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
+ "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
+ "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
+ "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
+ "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
+ "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
+ "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
+ "\x52\x7f\x29\x51\x94\x20\x67\x3c"
+ "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
+ "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
+ "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
+ "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
+ "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
+ "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
+ "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
+ "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
+ "\x65\x53\x0f\x41\x11\xbd\x98\x99"
+ "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
+ "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
+ "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
+ "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
+ "\x63\x51\x34\x9d\x96\x12\xae\xce"
+ "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
+ "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
+ "\x54\x27\x74\xbb\x10\x86\x57\x46"
+ "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
+ "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
+ "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
+ "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
+ "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
+ "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
+ "\x9b\x63\x76\x32\x2f\x19\x72\x10"
+ "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
+ "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
+static const struct cipher_testvec speck64_xts_dec_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
+ "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
+ "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
+ "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
+ .ilen = 32,
+ .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
+ "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
+ "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
+ "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
+ "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
+ "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
+ "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
+ "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
+ "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
+ "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
+ "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
+ "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
+ "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
+ "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
+ "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
+ "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
+ "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
+ "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
+ "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
+ "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
+ "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
+ "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
+ "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
+ "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
+ "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
+ "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
+ "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
+ "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
+ "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
+ "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
+ "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
+ "\x07\xff\xf3\x72\x74\x48\xb5\x40"
+ "\x50\xb5\xdd\x90\x43\x31\x18\x15"
+ "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
+ "\x29\x93\x90\x8b\xda\x07\xf0\x35"
+ "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
+ "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
+ "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
+ "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
+ "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
+ "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
+ "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
+ "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
+ "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
+ "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
+ "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
+ "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
+ "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
+ "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
+ "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
+ "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
+ "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
+ "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
+ "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
+ "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
+ "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
+ "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
+ "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
+ "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
+ "\x59\xda\xee\x1a\x22\x18\xda\x0d"
+ "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
+ "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
+ "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
+ "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
+ "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
+ "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
+ "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
+ "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
+ "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
+ "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27",
+ .klen = 32,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
+ "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
+ "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
+ "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
+ "\x78\x16\xea\x80\xdb\x33\x75\x94"
+ "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
+ "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
+ "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
+ "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
+ "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
+ "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
+ "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
+ "\x84\x5e\x46\xed\x20\x89\xb6\x44"
+ "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
+ "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
+ "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
+ "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
+ "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
+ "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
+ "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
+ "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
+ "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
+ "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
+ "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
+ "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
+ "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
+ "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
+ "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
+ "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
+ "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
+ "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
+ "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
+ "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
+ "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
+ "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
+ "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
+ "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
+ "\x52\x7f\x29\x51\x94\x20\x67\x3c"
+ "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
+ "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
+ "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
+ "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
+ "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
+ "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
+ "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
+ "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
+ "\x65\x53\x0f\x41\x11\xbd\x98\x99"
+ "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
+ "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
+ "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
+ "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
+ "\x63\x51\x34\x9d\x96\x12\xae\xce"
+ "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
+ "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
+ "\x54\x27\x74\xbb\x10\x86\x57\x46"
+ "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
+ "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
+ "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
+ "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
+ "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
+ "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
+ "\x9b\x63\x76\x32\x2f\x19\x72\x10"
+ "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
+ "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
/* Cast6 test vectors from RFC 2612 */
static const struct cipher_testvec cast6_enc_tv_template[] = {
{
diff --git a/crypto/xts.c b/crypto/xts.c
index f317c48b5e43..12284183bd20 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -357,78 +357,6 @@ static int decrypt(struct skcipher_request *req)
return do_decrypt(req, init_crypt(req, decrypt_done));
}
-int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
- struct scatterlist *ssrc, unsigned int nbytes,
- struct xts_crypt_req *req)
-{
- const unsigned int bsize = XTS_BLOCK_SIZE;
- const unsigned int max_blks = req->tbuflen / bsize;
- struct blkcipher_walk walk;
- unsigned int nblocks;
- le128 *src, *dst, *t;
- le128 *t_buf = req->tbuf;
- int err, i;
-
- BUG_ON(max_blks < 1);
-
- blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
- nbytes = walk.nbytes;
- if (!nbytes)
- return err;
-
- nblocks = min(nbytes / bsize, max_blks);
- src = (le128 *)walk.src.virt.addr;
- dst = (le128 *)walk.dst.virt.addr;
-
- /* calculate first value of T */
- req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
-
- i = 0;
- goto first;
-
- for (;;) {
- do {
- for (i = 0; i < nblocks; i++) {
- gf128mul_x_ble(&t_buf[i], t);
-first:
- t = &t_buf[i];
-
- /* PP <- T xor P */
- le128_xor(dst + i, t, src + i);
- }
-
- /* CC <- E(Key2,PP) */
- req->crypt_fn(req->crypt_ctx, (u8 *)dst,
- nblocks * bsize);
-
- /* C <- T xor CC */
- for (i = 0; i < nblocks; i++)
- le128_xor(dst + i, dst + i, &t_buf[i]);
-
- src += nblocks;
- dst += nblocks;
- nbytes -= nblocks * bsize;
- nblocks = min(nbytes / bsize, max_blks);
- } while (nblocks > 0);
-
- *(le128 *)walk.iv = *t;
-
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- if (!nbytes)
- break;
-
- nblocks = min(nbytes / bsize, max_blks);
- src = (le128 *)walk.src.virt.addr;
- dst = (le128 *)walk.dst.virt.addr;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(xts_crypt);
-
static int init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 4d0f571c15f9..d53541e96bee 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -452,3 +452,10 @@ config UML_RANDOM
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
+
+config HW_RANDOM_KEYSTONE
+ depends on ARCH_KEYSTONE
+ default HW_RANDOM
+ tristate "TI Keystone NETCP SA Hardware random number generator"
+ help
+ This option enables Keystone's hardware random generator.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b780370bd4eb..533e913c93d1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
+obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 7a84cec30c3a..6767d965c36c 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
/* Clock is optional on most platforms */
priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index dd1007aecb10..2d1352b67168 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -77,7 +77,7 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
}
/* Remove the VF */
-void cavium_rng_remove_vf(struct pci_dev *pdev)
+static void cavium_rng_remove_vf(struct pci_dev *pdev)
{
struct cavium_rng *rng;
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index a944e0a47f42..63d6e68c24d2 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -62,7 +62,7 @@ static int cavium_rng_probe(struct pci_dev *pdev,
}
/* Disable VF and RNG Hardware */
-void cavium_rng_remove(struct pci_dev *pdev)
+static void cavium_rng_remove(struct pci_dev *pdev)
{
struct cavium_rng_pf *rng;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index eca87249bcff..250123bc4905 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -300,7 +300,7 @@ static int __maybe_unused imx_rngc_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
new file mode 100644
index 000000000000..62c6696c1dbd
--- /dev/null
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -0,0 +1,257 @@
+/*
+ * Random Number Generator driver for the Keystone SOC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Sandeep Nair
+ * Vitaly Andrianov
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#define SA_CMD_STATUS_OFS 0x8
+
+/* TRNG enable control in SA System module*/
+#define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3)
+
+/* TRNG start control in TRNG module */
+#define TRNG_CNTL_REG_TRNG_ENABLE BIT(10)
+
+/* Data ready indicator in STATUS register */
+#define TRNG_STATUS_REG_READY BIT(0)
+
+/* Data ready clear control in INTACK register */
+#define TRNG_INTACK_REG_READY BIT(0)
+
+/*
+ * Number of samples taken to gather entropy during startup.
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_STARTUP_CYCLES 0
+#define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT 16
+
+/*
+ * Minimum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^6.
+ */
+#define TRNG_DEF_MIN_REFILL_CYCLES 1
+#define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT 0
+
+/*
+ * Maximum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_MAX_REFILL_CYCLES 0
+#define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT 16
+
+/* Number of CLK input cycles between samples */
+#define TRNG_DEF_CLK_DIV_CYCLES 0
+#define TRNG_CFG_REG_SAMPLE_DIV_SHIFT 8
+
+/* Maximum retries to get rng data */
+#define SA_MAX_RNG_DATA_RETRIES 5
+/* Delay between retries (in usecs) */
+#define SA_RNG_DATA_RETRY_DELAY 5
+
+struct trng_regs {
+ u32 output_l;
+ u32 output_h;
+ u32 status;
+ u32 intmask;
+ u32 intack;
+ u32 control;
+ u32 config;
+};
+
+struct ks_sa_rng {
+ struct device *dev;
+ struct hwrng rng;
+ struct clk *clk;
+ struct regmap *regmap_cfg;
+ struct trng_regs *reg_rng;
+};
+
+static int ks_sa_rng_init(struct hwrng *rng)
+{
+ u32 value;
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Enable RNG module */
+ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+ SA_CMD_STATUS_REG_TRNG_ENABLE,
+ SA_CMD_STATUS_REG_TRNG_ENABLE);
+
+ /* Configure RNG module */
+ writel(0, &ks_sa_rng->reg_rng->control);
+ value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT;
+ writel(value, &ks_sa_rng->reg_rng->control);
+
+ value = (TRNG_DEF_MIN_REFILL_CYCLES <<
+ TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) |
+ (TRNG_DEF_MAX_REFILL_CYCLES <<
+ TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) |
+ (TRNG_DEF_CLK_DIV_CYCLES <<
+ TRNG_CFG_REG_SAMPLE_DIV_SHIFT);
+
+ writel(value, &ks_sa_rng->reg_rng->config);
+
+ /* Disable all interrupts from TRNG */
+ writel(0, &ks_sa_rng->reg_rng->intmask);
+
+ /* Enable RNG */
+ value = readl(&ks_sa_rng->reg_rng->control);
+ value |= TRNG_CNTL_REG_TRNG_ENABLE;
+ writel(value, &ks_sa_rng->reg_rng->control);
+
+ return 0;
+}
+
+static void ks_sa_rng_cleanup(struct hwrng *rng)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Disable RNG */
+ writel(0, &ks_sa_rng->reg_rng->control);
+ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+ SA_CMD_STATUS_REG_TRNG_ENABLE, 0);
+}
+
+static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Read random data */
+ data[0] = readl(&ks_sa_rng->reg_rng->output_l);
+ data[1] = readl(&ks_sa_rng->reg_rng->output_h);
+
+ writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
+
+ return sizeof(u32) * 2;
+}
+
+static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ u32 ready;
+ int j;
+
+ for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
+ ready = readl(&ks_sa_rng->reg_rng->status);
+ ready &= TRNG_STATUS_REG_READY;
+
+ if (ready || !wait)
+ break;
+
+ udelay(SA_RNG_DATA_RETRY_DELAY);
+ }
+
+ return ready;
+}
+
+static int ks_sa_rng_probe(struct platform_device *pdev)
+{
+ struct ks_sa_rng *ks_sa_rng;
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct resource *mem;
+
+ ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
+ if (!ks_sa_rng)
+ return -ENOMEM;
+
+ ks_sa_rng->dev = dev;
+ ks_sa_rng->rng = (struct hwrng) {
+ .name = "ks_sa_hwrng",
+ .init = ks_sa_rng_init,
+ .data_read = ks_sa_rng_data_read,
+ .data_present = ks_sa_rng_data_present,
+ .cleanup = ks_sa_rng_cleanup,
+ };
+ ks_sa_rng->rng.priv = (unsigned long)dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(ks_sa_rng->reg_rng))
+ return PTR_ERR(ks_sa_rng->reg_rng);
+
+ ks_sa_rng->regmap_cfg =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,syscon-sa-cfg");
+
+ if (IS_ERR(ks_sa_rng->regmap_cfg)) {
+ dev_err(dev, "syscon_node_to_regmap failed\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable SA power-domain\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ks_sa_rng);
+
+ return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
+}
+
+static int ks_sa_rng_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ks_sa_rng_dt_match[] = {
+ {
+ .compatible = "ti,keystone-rng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match);
+
+static struct platform_driver ks_sa_rng_driver = {
+ .driver = {
+ .name = "ks-sa-rng",
+ .of_match_table = ks_sa_rng_dt_match,
+ },
+ .probe = ks_sa_rng_probe,
+ .remove = ks_sa_rng_remove,
+};
+
+module_platform_driver(ks_sa_rng_driver);
+
+MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver");
+MODULE_AUTHOR("Vitaly Andrianov <vitalya@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 467362262651..f83bee513d91 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -16,16 +16,13 @@
* This driver is based on other RNG drivers.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* RNGA Registers */
#define RNGA_CONTROL 0x00
@@ -197,10 +194,18 @@ static int __exit mxc_rnga_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id mxc_rnga_of_match[] = {
+ { .compatible = "fsl,imx21-rnga", },
+ { .compatible = "fsl,imx31-rnga", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxc_rnga_of_match);
+
static struct platform_driver mxc_rnga_driver = {
.driver = {
- .name = "mxc_rnga",
- },
+ .name = "mxc_rnga",
+ .of_match_table = mxc_rnga_of_match,
+ },
.remove = __exit_p(mxc_rnga_remove),
};
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 74d11ae6abe9..b65ff6962899 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -150,6 +150,7 @@ struct omap_rng_dev {
const struct omap_rng_pdata *pdata;
struct hwrng rng;
struct clk *clk;
+ struct clk *clk_reg;
};
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -480,6 +481,19 @@ static int omap_rng_probe(struct platform_device *pdev)
}
}
+ priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk_reg)) {
+ ret = clk_prepare_enable(priv->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to enable the register clk: %d\n",
+ ret);
+ goto err_register;
+ }
+ }
+
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
@@ -499,8 +513,8 @@ err_register:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_reg);
+ clk_disable_unprepare(priv->clk);
err_ioremap:
dev_err(dev, "initialization failed.\n");
return ret;
@@ -517,8 +531,8 @@ static int omap_rng_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_reg);
return 0;
}
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 63d84e6f1891..0d2328da3b76 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -16,15 +16,18 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#define RNG_CR 0x00
#define RNG_CR_RNGEN BIT(2)
+#define RNG_CR_CED BIT(5)
#define RNG_SR 0x04
#define RNG_SR_SEIS BIT(6)
@@ -33,19 +36,12 @@
#define RNG_DR 0x08
-/*
- * It takes 40 cycles @ 48MHz to generate each random number (e.g. <1us).
- * At the time of writing STM32 parts max out at ~200MHz meaning a timeout
- * of 500 leaves us a very comfortable margin for error. The loop to which
- * the timeout applies takes at least 4 instructions per iteration so the
- * timeout is enough to take us up to multi-GHz parts!
- */
-#define RNG_TIMEOUT 500
-
struct stm32_rng_private {
struct hwrng rng;
void __iomem *base;
struct clk *clk;
+ struct reset_control *rst;
+ bool ced;
};
static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -59,13 +55,16 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
while (max > sizeof(u32)) {
sr = readl_relaxed(priv->base + RNG_SR);
+ /* Manage timeout which is based on timer and take */
+ /* care of initial delay time when enabling rng */
if (!sr && wait) {
- unsigned int timeout = RNG_TIMEOUT;
-
- do {
- cpu_relax();
- sr = readl_relaxed(priv->base + RNG_SR);
- } while (!sr && --timeout);
+ retval = readl_relaxed_poll_timeout_atomic(priv->base
+ + RNG_SR,
+ sr, sr,
+ 10, 50000);
+ if (retval)
+ dev_err((struct device *)priv->rng.priv,
+ "%s: timeout %x!\n", __func__, sr);
}
/* If error detected or data not ready... */
@@ -99,7 +98,11 @@ static int stm32_rng_init(struct hwrng *rng)
if (err)
return err;
- writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
+ if (priv->ced)
+ writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
+ else
+ writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
+ priv->base + RNG_CR);
/* clear error indicators */
writel_relaxed(0, priv->base + RNG_SR);
@@ -140,6 +143,15 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
+ if (!IS_ERR(priv->rst)) {
+ reset_control_assert(priv->rst);
+ udelay(2);
+ reset_control_deassert(priv->rst);
+ }
+
+ priv->ced = of_property_read_bool(np, "clock-error-detect");
+
dev_set_drvdata(dev, priv);
priv->rng.name = dev_driver_string(dev),
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4b741b83e23f..d1ea1a07cecb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -464,13 +464,6 @@ if CRYPTO_DEV_UX500
source "drivers/crypto/ux500/Kconfig"
endif # if CRYPTO_DEV_UX500
-config CRYPTO_DEV_BFIN_CRC
- tristate "Support for Blackfin CRC hardware"
- depends on BF60x
- help
- Newer Blackfin processors have CRC hardware. Select this if you
- want to use the Blackfin CRC module.
-
config CRYPTO_DEV_ATMEL_AUTHENC
tristate "Support for Atmel IPSEC/SSL hw accelerator"
depends on HAS_DMA
@@ -730,4 +723,31 @@ config CRYPTO_DEV_ARTPEC6
To compile this driver as a module, choose M here.
+config CRYPTO_DEV_CCREE
+ tristate "Support for ARM TrustZone CryptoCell family of security processors"
+ depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
+ default n
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_CTR
+ select CRYPTO_XTS
+ help
+ Say 'Y' to enable a driver for the REE interface of the Arm
+ TrustZone CryptoCell family of processors. Currently the
+ CryptoCell 712, 710 and 630 are supported.
+ Choose this if you wish to use hardware acceleration of
+ cryptographic operations on the system REE.
+ If unsure say Y.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2513d13ea2c4..7ae87b4f6c8d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -3,9 +3,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
-obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 691c6465b71e..801aeab5ab1e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
badkey:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- memzero_explicit(&key, sizeof(keys));
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2602,16 +2602,13 @@ static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pd
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
pdata->dma_slave = devm_kzalloc(&pdev->dev,
sizeof(*(pdata->dma_slave)),
GFP_KERNEL);
if (!pdata->dma_slave) {
- dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
devm_kfree(&pdev->dev, pdata);
return ERR_PTR(-ENOMEM);
}
@@ -2649,7 +2646,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
if (aes_dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto aes_dd_err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8874aa5ca0f7..4d43081120db 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2726,18 +2726,14 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
pdata->dma_slave = devm_kzalloc(&pdev->dev,
sizeof(*(pdata->dma_slave)),
GFP_KERNEL);
- if (!pdata->dma_slave) {
- dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ if (!pdata->dma_slave)
return ERR_PTR(-ENOMEM);
- }
return pdata;
}
@@ -2758,7 +2754,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
if (sha_dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto sha_dd_err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 592124f8382b..97b0423efa7f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1312,18 +1312,14 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
pdata->dma_slave = devm_kzalloc(&pdev->dev,
sizeof(*(pdata->dma_slave)),
GFP_KERNEL);
- if (!pdata->dma_slave) {
- dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ if (!pdata->dma_slave)
return ERR_PTR(-ENOMEM);
- }
return pdata;
}
@@ -1344,7 +1340,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
if (tdes_dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto tdes_dd_err;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2b75f95bbe1b..309c67c7012f 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -818,7 +818,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
/* AES hashing keeps key size in type field, so need to copy it here */
if (hash_parms.alg == HASH_ALG_AES)
- hash_parms.type = cipher_parms.type;
+ hash_parms.type = (enum hash_type)cipher_parms.type;
else
hash_parms.type = spu->spu_hash_type(rctx->total_sent);
@@ -1409,7 +1409,7 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
rctx->iv_ctr_len);
if (ctx->auth.alg == HASH_ALG_AES)
- hash_parms.type = ctx->cipher_type;
+ hash_parms.type = (enum hash_type)ctx->cipher_type;
/* General case AAD padding (CCM and RFC4543 special cases below) */
aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d543c010ccd9..a912c6ad3e85 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -279,7 +279,6 @@ int do_shash(unsigned char *name, unsigned char *result,
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
- pr_err("%s: Memory allocation failure\n", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
deleted file mode 100644
index bfbf8bf77f03..000000000000
--- a/drivers/crypto/bfin_crc.c
+++ /dev/null
@@ -1,743 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support Blackfin CRC HW acceleration.
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <linux/cryptohash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
-#include <asm/unaligned.h>
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-#include <asm/io.h>
-
-#include "bfin_crc.h"
-
-#define CRC_CCRYPTO_QUEUE_LENGTH 5
-
-#define DRIVER_NAME "bfin-hmac-crc"
-#define CHKSUM_DIGEST_SIZE 4
-#define CHKSUM_BLOCK_SIZE 1
-
-#define CRC_MAX_DMA_DESC 100
-
-#define CRC_CRYPTO_STATE_UPDATE 1
-#define CRC_CRYPTO_STATE_FINALUPDATE 2
-#define CRC_CRYPTO_STATE_FINISH 3
-
-struct bfin_crypto_crc {
- struct list_head list;
- struct device *dev;
- spinlock_t lock;
-
- int irq;
- int dma_ch;
- u32 poly;
- struct crc_register *regs;
-
- struct ahash_request *req; /* current request in operation */
- struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
- dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
- u8 *sg_mid_buf;
- dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
-
- struct tasklet_struct done_task;
- struct crypto_queue queue; /* waiting requests */
-
- u8 busy:1; /* crc device in operation flag */
-};
-
-static struct bfin_crypto_crc_list {
- struct list_head dev_list;
- spinlock_t lock;
-} crc_list;
-
-struct bfin_crypto_crc_reqctx {
- struct bfin_crypto_crc *crc;
-
- unsigned int total; /* total request bytes */
- size_t sg_buflen; /* bytes for this update */
- unsigned int sg_nents;
- struct scatterlist *sg; /* sg list head for this update*/
- struct scatterlist bufsl[2]; /* chained sg list */
-
- size_t bufnext_len;
- size_t buflast_len;
- u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
- u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
-
- u8 flag;
-};
-
-struct bfin_crypto_crc_ctx {
- struct bfin_crypto_crc *crc;
- u32 key;
-};
-
-/*
- * get element in scatter list by given index
- */
-static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
- unsigned int index)
-{
- struct scatterlist *sg = NULL;
- int i;
-
- for_each_sg(sg_list, sg, nents, i)
- if (i == index)
- break;
-
- return sg;
-}
-
-static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
-{
- writel(0, &crc->regs->datacntrld);
- writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
- writel(key, &crc->regs->curresult);
-
- /* setup CRC interrupts */
- writel(CMPERRI | DCNTEXPI, &crc->regs->status);
- writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
-
- return 0;
-}
-
-static int bfin_crypto_crc_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
- struct bfin_crypto_crc *crc;
-
- dev_dbg(ctx->crc->dev, "crc_init\n");
- spin_lock_bh(&crc_list.lock);
- list_for_each_entry(crc, &crc_list.dev_list, list) {
- crc_ctx->crc = crc;
- break;
- }
- spin_unlock_bh(&crc_list.lock);
-
- if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
- dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
- CRC_MAX_DMA_DESC);
- return -EINVAL;
- }
-
- ctx->crc = crc;
- ctx->bufnext_len = 0;
- ctx->buflast_len = 0;
- ctx->sg_buflen = 0;
- ctx->total = 0;
- ctx->flag = 0;
-
- /* init crc results */
- put_unaligned_le32(crc_ctx->key, req->result);
-
- dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
- crypto_ahash_digestsize(tfm));
-
- return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
-}
-
-static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
-{
- struct scatterlist *sg;
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
- int i = 0, j = 0;
- unsigned long dma_config;
- unsigned int dma_count;
- unsigned int dma_addr;
- unsigned int mid_dma_count = 0;
- int dma_mod;
-
- dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
-
- for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
- dma_addr = sg_dma_address(sg);
- /* deduce extra bytes in last sg */
- if (sg_is_last(sg))
- dma_count = sg_dma_len(sg) - ctx->bufnext_len;
- else
- dma_count = sg_dma_len(sg);
-
- if (mid_dma_count) {
- /* Append last middle dma buffer to 4 bytes with first
- bytes in current sg buffer. Move addr of current
- sg and deduce the length of current sg.
- */
- memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
- sg_virt(sg),
- CHKSUM_DIGEST_SIZE - mid_dma_count);
- dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
- dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
-
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
- DMAEN | PSIZE_32 | WDSIZE_32;
-
- /* setup new dma descriptor for next middle dma */
- crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = 1;
- crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
- }
-
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
- /* chop current sg dma len to multiple of 32 bits */
- mid_dma_count = dma_count % 4;
- dma_count &= ~0x3;
-
- if (dma_addr % 4 == 0) {
- dma_config |= WDSIZE_32;
- dma_count >>= 2;
- dma_mod = 4;
- } else if (dma_addr % 2 == 0) {
- dma_config |= WDSIZE_16;
- dma_count >>= 1;
- dma_mod = 2;
- } else {
- dma_config |= WDSIZE_8;
- dma_mod = 1;
- }
-
- crc->sg_cpu[i].start_addr = dma_addr;
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = dma_count;
- crc->sg_cpu[i].x_modify = dma_mod;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
-
- if (mid_dma_count) {
- /* copy extra bytes to next middle dma buffer */
- memcpy(crc->sg_mid_buf + (i << 2),
- (u8*)sg_virt(sg) + (dma_count << 2),
- mid_dma_count);
- }
- }
-
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
- /* For final update req, append the buffer for next update as well*/
- if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
- ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
- crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
- CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = 1;
- crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
- }
-
- if (i == 0)
- return;
-
- /* Set the last descriptor to stop mode */
- crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
- crc->sg_cpu[i - 1].cfg |= DI_EN;
- set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
- set_dma_x_count(crc->dma_ch, 0);
- set_dma_x_modify(crc->dma_ch, 0);
- set_dma_config(crc->dma_ch, dma_config);
-}
-
-static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
- struct ahash_request *req)
-{
- struct crypto_async_request *async_req, *backlog;
- struct bfin_crypto_crc_reqctx *ctx;
- struct scatterlist *sg;
- int ret = 0;
- int nsg, i, j;
- unsigned int nextlen;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&crc->lock, flags);
- if (req)
- ret = ahash_enqueue_request(&crc->queue, req);
- if (crc->busy) {
- spin_unlock_irqrestore(&crc->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&crc->queue);
- async_req = crypto_dequeue_request(&crc->queue);
- if (async_req)
- crc->busy = 1;
- spin_unlock_irqrestore(&crc->lock, flags);
-
- if (!async_req)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ahash_request_cast(async_req);
- crc->req = req;
- ctx = ahash_request_ctx(req);
- ctx->sg = NULL;
- ctx->sg_buflen = 0;
- ctx->sg_nents = 0;
-
- dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
- ctx->flag, req->nbytes);
-
- if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
- if (ctx->bufnext_len == 0) {
- crc->busy = 0;
- return 0;
- }
-
- /* Pack last crc update buffer to 32bit */
- memset(ctx->bufnext + ctx->bufnext_len, 0,
- CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
- } else {
- /* Pack small data which is less than 32bit to buffer for next update. */
- if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
- memcpy(ctx->bufnext + ctx->bufnext_len,
- sg_virt(req->src), req->nbytes);
- ctx->bufnext_len += req->nbytes;
- if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
- ctx->bufnext_len) {
- goto finish_update;
- } else {
- crc->busy = 0;
- return 0;
- }
- }
-
- if (ctx->bufnext_len) {
- /* Chain in extra bytes of last update */
- ctx->buflast_len = ctx->bufnext_len;
- memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
-
- nsg = ctx->sg_buflen ? 2 : 1;
- sg_init_table(ctx->bufsl, nsg);
- sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
- if (nsg > 1)
- sg_chain(ctx->bufsl, nsg, req->src);
- ctx->sg = ctx->bufsl;
- } else
- ctx->sg = req->src;
-
- /* Chop crc buffer size to multiple of 32 bit */
- nsg = sg_nents(ctx->sg);
- ctx->sg_nents = nsg;
- ctx->sg_buflen = ctx->buflast_len + req->nbytes;
- ctx->bufnext_len = ctx->sg_buflen % 4;
- ctx->sg_buflen &= ~0x3;
-
- if (ctx->bufnext_len) {
- /* copy extra bytes to buffer for next update */
- memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
- nextlen = ctx->bufnext_len;
- for (i = nsg - 1; i >= 0; i--) {
- sg = sg_get(ctx->sg, nsg, i);
- j = min(nextlen, sg_dma_len(sg));
- memcpy(ctx->bufnext + nextlen - j,
- sg_virt(sg) + sg_dma_len(sg) - j, j);
- if (j == sg_dma_len(sg))
- ctx->sg_nents--;
- nextlen -= j;
- if (nextlen == 0)
- break;
- }
- }
- }
-
-finish_update:
- if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
- ctx->flag == CRC_CRYPTO_STATE_FINISH))
- ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
-
- /* set CRC data count before start DMA */
- writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
-
- /* setup and enable CRC DMA */
- bfin_crypto_crc_config_dma(crc);
-
- /* finally kick off CRC operation */
- reg = readl(&crc->regs->control);
- writel(reg | BLKEN, &crc->regs->control);
-
- return -EINPROGRESS;
-}
-
-static int bfin_crypto_crc_update(struct ahash_request *req)
-{
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
-
- if (!req->nbytes)
- return 0;
-
- dev_dbg(ctx->crc->dev, "crc_update\n");
- ctx->total += req->nbytes;
- ctx->flag = CRC_CRYPTO_STATE_UPDATE;
-
- return bfin_crypto_crc_handle_queue(ctx->crc, req);
-}
-
-static int bfin_crypto_crc_final(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
-
- dev_dbg(ctx->crc->dev, "crc_final\n");
- ctx->flag = CRC_CRYPTO_STATE_FINISH;
- crc_ctx->key = 0;
-
- return bfin_crypto_crc_handle_queue(ctx->crc, req);
-}
-
-static int bfin_crypto_crc_finup(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
-
- dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
- ctx->total += req->nbytes;
- ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
- crc_ctx->key = 0;
-
- return bfin_crypto_crc_handle_queue(ctx->crc, req);
-}
-
-static int bfin_crypto_crc_digest(struct ahash_request *req)
-{
- int ret;
-
- ret = bfin_crypto_crc_init(req);
- if (ret)
- return ret;
-
- return bfin_crypto_crc_finup(req);
-}
-
-static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
-
- dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
- if (keylen != CHKSUM_DIGEST_SIZE) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- crc_ctx->key = get_unaligned_le32(key);
-
- return 0;
-}
-
-static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
-{
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
-
- crc_ctx->key = 0;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct bfin_crypto_crc_reqctx));
-
- return 0;
-}
-
-static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct ahash_alg algs = {
- .init = bfin_crypto_crc_init,
- .update = bfin_crypto_crc_update,
- .final = bfin_crypto_crc_final,
- .finup = bfin_crypto_crc_finup,
- .digest = bfin_crypto_crc_digest,
- .setkey = bfin_crypto_crc_setkey,
- .halg.digestsize = CHKSUM_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(crc32)",
- .cra_driver_name = DRIVER_NAME,
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = bfin_crypto_crc_cra_init,
- .cra_exit = bfin_crypto_crc_cra_exit,
- }
-};
-
-static void bfin_crypto_crc_done_task(unsigned long data)
-{
- struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
-
- bfin_crypto_crc_handle_queue(crc, NULL);
-}
-
-static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
-{
- struct bfin_crypto_crc *crc = dev_id;
- u32 reg;
-
- if (readl(&crc->regs->status) & DCNTEXP) {
- writel(DCNTEXP, &crc->regs->status);
-
- /* prepare results */
- put_unaligned_le32(readl(&crc->regs->result),
- crc->req->result);
-
- reg = readl(&crc->regs->control);
- writel(reg & ~BLKEN, &crc->regs->control);
- crc->busy = 0;
-
- if (crc->req->base.complete)
- crc->req->base.complete(&crc->req->base, 0);
-
- tasklet_schedule(&crc->done_task);
-
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-}
-
-#ifdef CONFIG_PM
-/**
- * bfin_crypto_crc_suspend - suspend crc device
- * @pdev: device being suspended
- * @state: requested suspend state
- */
-static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
- int i = 100000;
-
- while ((readl(&crc->regs->control) & BLKEN) && --i)
- cpu_relax();
-
- if (i == 0)
- return -EBUSY;
-
- return 0;
-}
-#else
-# define bfin_crypto_crc_suspend NULL
-#endif
-
-#define bfin_crypto_crc_resume NULL
-
-/**
- * bfin_crypto_crc_probe - Initialize module
- *
- */
-static int bfin_crypto_crc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct bfin_crypto_crc *crc;
- unsigned int timeout = 100000;
- int ret;
-
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
- if (!crc) {
- dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
- return -ENOMEM;
- }
-
- crc->dev = dev;
-
- INIT_LIST_HEAD(&crc->list);
- spin_lock_init(&crc->lock);
- tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
- crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- crc->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR((void *)crc->regs)) {
- dev_err(&pdev->dev, "Cannot map CRC IO\n");
- return PTR_ERR((void *)crc->regs);
- }
-
- crc->irq = platform_get_irq(pdev, 0);
- if (crc->irq < 0) {
- dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
- return -ENOENT;
- }
-
- ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
- IRQF_SHARED, dev_name(dev), crc);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
- return ret;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "No CRC DMA channel specified\n");
- return -ENOENT;
- }
- crc->dma_ch = res->start;
-
- ret = request_dma(crc->dma_ch, dev_name(dev));
- if (ret) {
- dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
- return ret;
- }
-
- crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
- if (crc->sg_cpu == NULL) {
- ret = -ENOMEM;
- goto out_error_dma;
- }
- /*
- * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
- * 1 last + 1 next dma descriptors
- */
- crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
- crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
- * ((CRC_MAX_DMA_DESC + 1) << 1);
-
- writel(0, &crc->regs->control);
- crc->poly = (u32)pdev->dev.platform_data;
- writel(crc->poly, &crc->regs->poly);
-
- while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
- cpu_relax();
-
- if (timeout == 0)
- dev_info(&pdev->dev, "init crc poly timeout\n");
-
- platform_set_drvdata(pdev, crc);
-
- spin_lock(&crc_list.lock);
- list_add(&crc->list, &crc_list.dev_list);
- spin_unlock(&crc_list.lock);
-
- if (list_is_singular(&crc_list.dev_list)) {
- ret = crypto_register_ahash(&algs);
- if (ret) {
- dev_err(&pdev->dev,
- "Can't register crypto ahash device\n");
- goto out_error_dma;
- }
- }
-
- dev_info(&pdev->dev, "initialized\n");
-
- return 0;
-
-out_error_dma:
- if (crc->sg_cpu)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
- free_dma(crc->dma_ch);
-
- return ret;
-}
-
-/**
- * bfin_crypto_crc_remove - Initialize module
- *
- */
-static int bfin_crypto_crc_remove(struct platform_device *pdev)
-{
- struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
-
- if (!crc)
- return -ENODEV;
-
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
-
- crypto_unregister_ahash(&algs);
- tasklet_kill(&crc->done_task);
- free_dma(crc->dma_ch);
-
- return 0;
-}
-
-static struct platform_driver bfin_crypto_crc_driver = {
- .probe = bfin_crypto_crc_probe,
- .remove = bfin_crypto_crc_remove,
- .suspend = bfin_crypto_crc_suspend,
- .resume = bfin_crypto_crc_resume,
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-/**
- * bfin_crypto_crc_mod_init - Initialize module
- *
- * Checks the module params and registers the platform driver.
- * Real work is in the platform probe function.
- */
-static int __init bfin_crypto_crc_mod_init(void)
-{
- int ret;
-
- pr_info("Blackfin hardware CRC crypto driver\n");
-
- INIT_LIST_HEAD(&crc_list.dev_list);
- spin_lock_init(&crc_list.lock);
-
- ret = platform_driver_register(&bfin_crypto_crc_driver);
- if (ret) {
- pr_err("unable to register driver\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * bfin_crypto_crc_mod_exit - Deinitialize module
- */
-static void __exit bfin_crypto_crc_mod_exit(void)
-{
- platform_driver_unregister(&bfin_crypto_crc_driver);
-}
-
-module_init(bfin_crypto_crc_mod_init);
-module_exit(bfin_crypto_crc_mod_exit);
-
-MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h
deleted file mode 100644
index 786ef746d109..000000000000
--- a/drivers/crypto/bfin_crc.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * bfin_crc.h - interface to Blackfin CRC controllers
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef __BFIN_CRC_H__
-#define __BFIN_CRC_H__
-
-/* Function driver which use hardware crc must initialize the structure */
-struct crc_info {
- /* Input data address */
- unsigned char *in_addr;
- /* Output data address */
- unsigned char *out_addr;
- /* Input or output bytes */
- unsigned long datasize;
- union {
- /* CRC to compare with that of input buffer */
- unsigned long crc_compare;
- /* Value to compare with input data */
- unsigned long val_verify;
- /* Value to fill */
- unsigned long val_fill;
- };
- /* Value to program the 32b CRC Polynomial */
- unsigned long crc_poly;
- union {
- /* CRC calculated from the input data */
- unsigned long crc_result;
- /* First failed position to verify input data */
- unsigned long pos_verify;
- };
- /* CRC mirror flags */
- unsigned int bitmirr:1;
- unsigned int bytmirr:1;
- unsigned int w16swp:1;
- unsigned int fdsel:1;
- unsigned int rsltmirr:1;
- unsigned int polymirr:1;
- unsigned int cmpmirr:1;
-};
-
-/* Userspace interface */
-#define CRC_IOC_MAGIC 'C'
-#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
-#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
-#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
-#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
-
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-
-struct crc_register {
- u32 control;
- u32 datacnt;
- u32 datacntrld;
- u32 __pad_1[2];
- u32 compare;
- u32 fillval;
- u32 datafifo;
- u32 intren;
- u32 intrenset;
- u32 intrenclr;
- u32 poly;
- u32 __pad_2[4];
- u32 status;
- u32 datacntcap;
- u32 __pad_3;
- u32 result;
- u32 curresult;
- u32 __pad_4[3];
- u32 revid;
-};
-
-/* CRC_STATUS Masks */
-#define CMPERR 0x00000002 /* Compare error */
-#define DCNTEXP 0x00000010 /* datacnt register expired */
-#define IBR 0x00010000 /* Input buffer ready */
-#define OBR 0x00020000 /* Output buffer ready */
-#define IRR 0x00040000 /* Immediate result readt */
-#define LUTDONE 0x00080000 /* Look-up table generation done */
-#define FSTAT 0x00700000 /* FIFO status */
-#define MAX_FIFO 4 /* Max fifo size */
-
-/* CRC_CONTROL Masks */
-#define BLKEN 0x00000001 /* Block enable */
-#define OPMODE 0x000000F0 /* Operation mode */
-#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
-#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
-#define MODE_DATA_FILL 2 /* MTM data fill */
-#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
-#define MODE_DATA_VERIFY 4 /* MSM data verify */
-#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
-#define AUTOCLRF 0x00000200 /* Auto clear to one */
-#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
-#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
-#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
-#define BITMIRR_OFFSET 16 /* Mirror bits offset */
-#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
-#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
-#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
-#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
-#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
-#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
-#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
-#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
-#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
-#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
-#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
-#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
-
-/* CRC_INTREN Masks */
-#define CMPERRI 0x02 /* CRC_ERROR_INTR */
-#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
-
-#endif
-
-#endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2188235be02d..7207a535942d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -328,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -349,7 +350,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_enc;
- cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
@@ -366,7 +367,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_dec;
- cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -387,6 +388,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -408,7 +410,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_enc;
- cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
@@ -425,7 +428,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_dec;
- cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -447,6 +451,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -468,7 +473,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_enc;
- cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
@@ -485,7 +491,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_dec;
- cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -563,9 +570,11 @@ static int aead_setkey(struct crypto_aead *aead,
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
+ memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index ceb93fbb76e6..8ae7a1be7dfd 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -625,10 +625,13 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
*zero_assoc_jump_cmd2;
@@ -650,11 +653,35 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
+ ivsize);
+ } else {
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+ CAAM_CMD_SZ);
+ }
+
/* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
JUMP_COND_MATH_Z);
+ if (is_qi)
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
/* if assoclen is ZERO, skip reading the assoc data */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
@@ -686,8 +713,11 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
+ /* jump to ICV writing */
+ if (is_qi)
+ append_jump(desc, JUMP_TEST_ALL | 4);
+ else
+ append_jump(desc, JUMP_TEST_ALL | 2);
/* zero-payload commands */
set_jump_tgt_here(desc, zero_payload_jump_cmd);
@@ -695,10 +725,18 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
/* read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+ if (is_qi)
+ /* jump to ICV writing */
+ append_jump(desc, JUMP_TEST_ALL | 2);
/* There is no input data */
set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+ if (is_qi)
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+ FIFOLD_TYPE_LAST1);
+
/* write ICV */
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
@@ -715,10 +753,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
@@ -739,6 +780,24 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
/* if assoclen is ZERO, skip reading the assoc data */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
@@ -791,10 +850,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd;
@@ -815,7 +877,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Read assoc data */
@@ -823,7 +907,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
/* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -862,10 +946,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd;
@@ -887,7 +974,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Read assoc data */
@@ -895,7 +1004,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
/* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
@@ -934,10 +1043,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -958,6 +1070,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
+ if (is_qi) {
+ /* assoclen is not needed, skip it */
+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
/* assoclen + cryptlen = seqinlen */
append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1004,10 +1128,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -1028,6 +1155,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (is_qi) {
+ /* assoclen is not needed, skip it */
+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
/* assoclen + cryptlen = seqoutlen */
append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 5f9445ae2114..a917af5776ce 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -27,14 +27,20 @@
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
@@ -67,22 +73,28 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4aecc9435f69..cacda0831390 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -278,12 +278,317 @@ skip_split_key:
}
}
+ memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ ret = rfc4106_set_sh_desc(aead);
+ if (ret)
+ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ ret = rfc4543_set_sh_desc(aead);
+ if (ret)
+ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
@@ -562,8 +867,18 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
qidev = caam_ctx->qidev;
if (unlikely(status)) {
+ u32 ssrc = status & JRSTA_SSRC_MASK;
+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
caam_jr_strstatus(qidev, status);
- ecode = -EIO;
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
}
edesc = container_of(drv_req, typeof(*edesc), drv_req);
@@ -807,6 +1122,22 @@ static int aead_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_crypt(req, true);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_crypt(req, false);
+}
+
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
{
struct ablkcipher_edesc *edesc;
@@ -1327,6 +1658,61 @@ static struct caam_alg_template driver_algs[] = {
};
static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
/* single-pass ipsec_esp descriptor */
{
.aead = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e843cf410373..e4cc636e1104 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -337,7 +337,8 @@ static int caam_remove(struct platform_device *pdev)
/* shut clocks off before finalizing shutdown */
clk_disable_unprepare(ctrlpriv->caam_ipg);
- clk_disable_unprepare(ctrlpriv->caam_mem);
+ if (ctrlpriv->caam_mem)
+ clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
if (ctrlpriv->caam_emi_slow)
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
@@ -466,14 +467,17 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_ipg = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "mem");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM mem clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx7d") &&
+ !of_machine_is_compatible("fsl,imx7s")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "mem");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM mem clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_mem = clk;
}
- ctrlpriv->caam_mem = clk;
clk = caam_drv_identify_clk(&pdev->dev, "aclk");
if (IS_ERR(clk)) {
@@ -484,7 +488,9 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- if (!of_machine_is_compatible("fsl,imx6ul")) {
+ if (!of_machine_is_compatible("fsl,imx6ul") &&
+ !of_machine_is_compatible("fsl,imx7d") &&
+ !of_machine_is_compatible("fsl,imx7s")) {
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -501,11 +507,13 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
- ret);
- goto disable_caam_ipg;
+ if (ctrlpriv->caam_mem) {
+ ret = clk_prepare_enable(ctrlpriv->caam_mem);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+ ret);
+ goto disable_caam_ipg;
+ }
}
ret = clk_prepare_enable(ctrlpriv->caam_aclk);
@@ -815,9 +823,6 @@ static int caam_probe(struct platform_device *pdev)
return 0;
caam_remove:
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
caam_remove(pdev);
return ret;
@@ -829,7 +834,8 @@ disable_caam_emi_slow:
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
- clk_disable_unprepare(ctrlpriv->caam_mem);
+ if (ctrlpriv->caam_mem)
+ clk_disable_unprepare(ctrlpriv->caam_mem);
disable_caam_ipg:
clk_disable_unprepare(ctrlpriv->caam_ipg);
return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f9a44f485aac..b9480828da38 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -579,8 +579,15 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
fd = &dqrr->fd;
status = be32_to_cpu(fd->status);
- if (unlikely(status))
- dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
+ if (unlikely(status)) {
+ u32 ssrc = status & JRSTA_SSRC_MASK;
+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(qidev, "Error: %#x in CAAM response FD\n",
+ status);
+ }
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
dev_err(qidev, "Non-compound FD from CAAM\n");
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 34a6d8bf229e..06ad85ab5e86 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -436,7 +436,7 @@ static int cpt_device_init(struct cpt_device *cpt)
/* Reset the PF when probed first */
cpt_reset(cpt);
- mdelay(100);
+ msleep(100);
/*Check BIST status*/
bist = (u64)cpt_check_bist_status(cpt);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 60fc0fa26fd3..26687f318de6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
}
/* Update result area if supplied */
- if (req->result)
+ if (req->result && rctx->final)
memcpy(req->result, rctx->iv, digest_size);
e_free:
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index e6db8672d89c..05850dfd7940 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
{
- if (ccp_version() > CCP_VERSION(3, 0))
- return CCP5_RSA_MAXMOD;
- else
- return CCP_RSA_MAXMOD;
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ctx->u.rsa.n_len;
}
static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8b9b16d433f7..871c9628a2ee 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
}
/* Update result area if supplied */
- if (req->result)
+ if (req->result && rctx->final)
memcpy(req->result, rctx->ctx, digest_size);
e_free:
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 59d4ca4e72d8..1a734bd2070a 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = {
};
static struct dentry *ccp_debugfs_dir;
-static DEFINE_RWLOCK(ccp_debugfs_lock);
+static DEFINE_MUTEX(ccp_debugfs_lock);
#define MAX_NAME_LEN 20
@@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
struct dentry *debugfs_stats;
struct dentry *debugfs_q_instance;
struct dentry *debugfs_q_stats;
- unsigned long flags;
int i;
if (!debugfs_initialized())
return;
- write_lock_irqsave(&ccp_debugfs_lock, flags);
+ mutex_lock(&ccp_debugfs_lock);
if (!ccp_debugfs_dir)
ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- write_unlock_irqrestore(&ccp_debugfs_lock, flags);
+ mutex_unlock(&ccp_debugfs_lock);
if (!ccp_debugfs_dir)
return;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 8b9da58459df..67155cb21636 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -38,7 +38,7 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT;
module_param(dma_chan_attr, uint, 0444);
MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
-unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
+static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
{
switch (dma_chan_attr) {
case CCP_DMA_DFLT:
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 406b95329b3d..0ea43cdeb05f 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
return 0;
}
-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
- struct scatterlist *sg, unsigned int sg_offset,
- unsigned int len)
+static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+ struct scatterlist *sg, unsigned int sg_offset,
+ unsigned int len)
{
WARN_ON(!wa->address);
+ if (len > (wa->length - wa_offset))
+ return -EINVAL;
+
scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
0);
+ return 0;
}
static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
unsigned int len)
{
u8 *p, *q;
+ int rc;
- ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+ rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+ if (rc)
+ return rc;
p = wa->address + wa_offset;
q = p + len - 1;
@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ if (ret)
+ goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
goto e_key;
dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
goto e_src;
}
- ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
- aes->cmac_key_len);
+ ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+ aes->cmac_key_len);
+ if (ret)
+ goto e_src;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ if (ret)
+ goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
goto e_dst;
}
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_dst;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
- ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+ if (ret)
+ goto e_tag;
ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
ccp_dm_free(&tag);
@@ -914,7 +935,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ if (ret)
+ goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -935,7 +958,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (aes->mode != CCP_AES_MODE_ECB) {
/* Load the AES context - convert to LE */
dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1113,8 +1138,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
* big endian to little endian.
*/
dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ if (ret)
+ goto e_key;
} else {
/* Version 5 CCPs use a 512-bit space for the key: each portion
* occupies 256 bits, or one entire slot, and is zero-padded.
@@ -1123,9 +1152,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
dm_offset = CCP_SB_BYTES;
pad = dm_offset - xts->key_len;
- ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
- xts->key_len);
+ ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
+ xts->key_len, xts->key_len);
+ if (ret)
+ goto e_key;
}
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -1144,7 +1177,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
goto e_key;
- ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+ ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
@@ -1287,12 +1322,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
len_singlekey = des3->key_len / 3;
- ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
- des3->key, 0, len_singlekey);
- ccp_set_dm_area(&key, dm_offset + len_singlekey,
- des3->key, len_singlekey, len_singlekey);
- ccp_set_dm_area(&key, dm_offset,
- des3->key, 2 * len_singlekey, len_singlekey);
+ ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+ des3->key, 0, len_singlekey);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
+ des3->key, len_singlekey, len_singlekey);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, dm_offset,
+ des3->key, 2 * len_singlekey, len_singlekey);
+ if (ret)
+ goto e_key;
/* Copy the key to the SB */
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
@@ -1320,7 +1361,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Load the context into the LSB */
dm_offset = CCP_SB_BYTES - des3->iv_len;
- ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
+ des3->iv_len);
+ if (ret)
+ goto e_ctx;
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
@@ -1604,8 +1648,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
}
} else {
/* Restore the context */
- ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
- sb_count * CCP_SB_BYTES);
+ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+ sb_count * CCP_SB_BYTES);
+ if (ret)
+ goto e_ctx;
}
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
@@ -1927,7 +1973,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
return ret;
- ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+ ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+ if (ret)
+ goto e_mask;
ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index b3afb6cc9d72..d95ec526587a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -367,8 +367,6 @@ e_free:
void *psp_copy_user_blob(u64 __user uaddr, u32 len)
{
- void *data;
-
if (!uaddr || !len)
return ERR_PTR(-EINVAL);
@@ -376,18 +374,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
if (len > SEV_FW_BLOB_MAX_SIZE)
return ERR_PTR(-EINVAL);
- data = kmalloc(len, GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
- goto e_free;
-
- return data;
-
-e_free:
- kfree(data);
- return ERR_PTR(-EFAULT);
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
}
EXPORT_SYMBOL_GPL(psp_copy_user_blob);
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index eb0da6572720..e0459002eb71 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void)
goto unlock;
list_for_each_entry(i, &sp_units, entry) {
- if (i->psp_data)
+ if (i->psp_data && i->get_psp_master_device) {
+ ret = i->get_psp_master_device();
break;
+ }
}
- if (i->get_psp_master_device)
- ret = i->get_psp_master_device();
unlock:
write_unlock_irqrestore(&sp_unit_lock, flags);
return ret;
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
new file mode 100644
index 000000000000..bdc27970f95f
--- /dev/null
+++ b/drivers/crypto/ccree/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
new file mode 100644
index 000000000000..03f4b9fce556
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -0,0 +1,2718 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <linux/rtnetlink.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define template_aead template_u.aead
+
+#define MAX_AEAD_SETKEY_SEQ 12
+#define MAX_AEAD_PROCESS_SEQ 23
+
+#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
+#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
+
+#define AES_CCM_RFC4309_NONCE_SIZE 3
+#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
+
+/* Value of each ICV_CMP byte (of 8) in case of success */
+#define ICV_VERIF_OK 0x01
+
+struct cc_aead_handle {
+ cc_sram_addr_t sram_workspace_addr;
+ struct list_head aead_list;
+};
+
+struct cc_hmac_s {
+ u8 *padded_authkey;
+ u8 *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+ u8 *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+};
+
+struct cc_aead_ctx {
+ struct cc_drvdata *drvdata;
+ u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+ u8 *enckey;
+ dma_addr_t enckey_dma_addr;
+ union {
+ struct cc_hmac_s hmac;
+ struct cc_xcbc_s xcbc;
+ } auth_state;
+ unsigned int enc_keylen;
+ unsigned int auth_keylen;
+ unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ enum drv_cipher_mode cipher_mode;
+ enum cc_flow_mode flow_mode;
+ enum drv_hash_mode auth_mode;
+};
+
+static inline bool valid_assoclen(struct aead_request *req)
+{
+ return ((req->assoclen == 16) || (req->assoclen == 20));
+}
+
+static void cc_aead_exit(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
+ crypto_tfm_alg_name(&tfm->base));
+
+ /* Unmap enckey buffer */
+ if (ctx->enckey) {
+ dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+ ctx->enckey_dma_addr);
+ dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ &ctx->enckey_dma_addr);
+ ctx->enckey_dma_addr = 0;
+ ctx->enckey = NULL;
+ }
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+ if (xcbc->xcbc_keys) {
+ dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
+ xcbc->xcbc_keys,
+ xcbc->xcbc_keys_dma_addr);
+ }
+ dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ &xcbc->xcbc_keys_dma_addr);
+ xcbc->xcbc_keys_dma_addr = 0;
+ xcbc->xcbc_keys = NULL;
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ if (hmac->ipad_opad) {
+ dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
+ hmac->ipad_opad,
+ hmac->ipad_opad_dma_addr);
+ dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ &hmac->ipad_opad_dma_addr);
+ hmac->ipad_opad_dma_addr = 0;
+ hmac->ipad_opad = NULL;
+ }
+ if (hmac->padded_authkey) {
+ dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
+ hmac->padded_authkey,
+ hmac->padded_authkey_dma_addr);
+ dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ &hmac->padded_authkey_dma_addr);
+ hmac->padded_authkey_dma_addr = 0;
+ hmac->padded_authkey = NULL;
+ }
+ }
+}
+
+static int cc_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, aead_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
+ crypto_tfm_alg_name(&tfm->base));
+
+ /* Initialize modes in instance */
+ ctx->cipher_mode = cc_alg->cipher_mode;
+ ctx->flow_mode = cc_alg->flow_mode;
+ ctx->auth_mode = cc_alg->auth_mode;
+ ctx->drvdata = cc_alg->drvdata;
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
+
+ /* Allocate key buffer, cache line aligned */
+ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+ &ctx->enckey_dma_addr, GFP_KERNEL);
+ if (!ctx->enckey) {
+ dev_err(dev, "Failed allocating key buffer\n");
+ goto init_failed;
+ }
+ dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
+ ctx->enckey);
+
+ /* Set default authlen value */
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+ const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
+ /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
+ /* (and temporary for user key - up to 256b) */
+ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+ &xcbc->xcbc_keys_dma_addr,
+ GFP_KERNEL);
+ if (!xcbc->xcbc_keys) {
+ dev_err(dev, "Failed allocating buffer for XCBC keys\n");
+ goto init_failed;
+ }
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+ const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+ dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
+ /* Allocate dma-coherent buffer for IPAD + OPAD */
+ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+ &hmac->ipad_opad_dma_addr,
+ GFP_KERNEL);
+
+ if (!hmac->ipad_opad) {
+ dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
+ goto init_failed;
+ }
+
+ dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
+ hmac->ipad_opad);
+
+ hmac->padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ pkey_dma,
+ GFP_KERNEL);
+
+ if (!hmac->padded_authkey) {
+ dev_err(dev, "failed to allocate padded_authkey\n");
+ goto init_failed;
+ }
+ } else {
+ ctx->auth_state.hmac.ipad_opad = NULL;
+ ctx->auth_state.hmac.padded_authkey = NULL;
+ }
+
+ return 0;
+
+init_failed:
+ cc_aead_exit(tfm);
+ return -ENOMEM;
+}
+
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
+{
+ struct aead_request *areq = (struct aead_request *)cc_req;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ cc_unmap_aead_request(dev, areq);
+
+ /* Restore ordinary iv pointer */
+ areq->iv = areq_ctx->backup_iv;
+
+ if (err)
+ goto done;
+
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
+ ctx->authsize) != 0) {
+ dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
+ ctx->authsize, ctx->cipher_mode);
+ /* In case of payload authentication failure, MUST NOT
+ * revealed the decrypted message --> zero its memory.
+ */
+ cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ err = -EBADMSG;
+ }
+ } else { /*ENCRYPT*/
+ if (areq_ctx->is_icv_fragmented) {
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+ areq_ctx->dst_sgl, skip,
+ (skip + ctx->authsize),
+ CC_SG_FROM_BUF);
+ }
+
+ /* If an IV was generated, copy it back to the user provided
+ * buffer.
+ */
+ if (areq_ctx->backup_giv) {
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_IV_SIZE);
+ else if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+ }
+ }
+done:
+ aead_request_complete(areq, err);
+}
+
+static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
+{
+ /* Load the AES key */
+ hw_desc_init(&desc[0]);
+ /* We are using for the source/user key the same buffer
+ * as for the output keys, * because after this key loading it
+ * is not needed anymore
+ */
+ set_din_type(&desc[0], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
+ NS_BIT);
+ set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[0], ctx->auth_keylen);
+ set_flow_mode(&desc[0], S_DIN_to_AES);
+ set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
+
+ hw_desc_init(&desc[1]);
+ set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[1], DIN_AES_DOUT);
+ set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ hw_desc_init(&desc[2]);
+ set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[2], DIN_AES_DOUT);
+ set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ hw_desc_init(&desc[3]);
+ set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[3], DIN_AES_DOUT);
+ set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + 2 * AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ return 4;
+}
+
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int digest_ofs = 0;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ unsigned int idx = 0;
+ int i;
+
+ /* calc derived HMAC key */
+ for (i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx],
+ cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode),
+ digest_size);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ hmac->padded_authkey_dma_addr,
+ SHA256_BLOCK_SIZE, NS_BIT);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the digset */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_dout_dlli(&desc[idx],
+ (hmac->ipad_opad_dma_addr + digest_ofs),
+ digest_size, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ idx++;
+
+ digest_ofs += digest_size;
+ }
+
+ return idx;
+}
+
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
+ ctx->enc_keylen, ctx->auth_keylen);
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ break;
+ case DRV_HASH_XCBC_MAC:
+ if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+ ctx->auth_keylen != AES_KEYSIZE_192 &&
+ ctx->auth_keylen != AES_KEYSIZE_256)
+ return -ENOTSUPP;
+ break;
+ case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
+ if (ctx->auth_keylen > 0)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ return -EINVAL;
+ }
+ /* Check cipher key size */
+ if (ctx->flow_mode == S_DIN_to_DES) {
+ if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
+ dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ } else { /* Default assumed to be AES ciphers */
+ if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_192 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
+ dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ }
+
+ return 0; /* All tests of keys sizes passed */
+}
+
+/* This function prepers the user key so it can pass to the hmac processing
+ * (copy to intenral buffer or hash in case of key longer than block
+ */
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ dma_addr_t key_dma_addr = 0;
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+ struct cc_crypto_req cc_req = {};
+ unsigned int blocksize;
+ unsigned int digestsize;
+ unsigned int hashmode;
+ unsigned int idx = 0;
+ int rc = 0;
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+ dma_addr_t padded_authkey_dma_addr =
+ ctx->auth_state.hmac.padded_authkey_dma_addr;
+
+ switch (ctx->auth_mode) { /* auth_key required and >0 */
+ case DRV_HASH_SHA1:
+ blocksize = SHA1_BLOCK_SIZE;
+ digestsize = SHA1_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA1;
+ break;
+ case DRV_HASH_SHA256:
+ default:
+ blocksize = SHA256_BLOCK_SIZE;
+ digestsize = SHA256_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA256;
+ }
+
+ if (keylen != 0) {
+ key_dma_addr = dma_map_single(dev, (void *)key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_din_sram(&desc[idx], larval_addr, digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ key_dma_addr, keylen, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
+ digestsize), (blocksize - digestsize),
+ NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+ keylen, NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen) != 0) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (padded_authkey_dma_addr +
+ keylen),
+ (blocksize - keylen), NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc)
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+
+ if (key_dma_addr)
+ dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct rtattr *rta = (struct rtattr *)key;
+ struct cc_crypto_req cc_req = {};
+ struct crypto_authenc_key_param *param;
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+ int rc = -EINVAL;
+ unsigned int seq_len = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+ param = RTA_DATA(rta);
+ ctx->enc_keylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < ctx->enc_keylen)
+ goto badkey;
+ ctx->auth_keylen = keylen - ctx->enc_keylen;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* the nonce is stored in bytes at end of key */
+ if (ctx->enc_keylen <
+ (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
+ goto badkey;
+ /* Copy nonce from last 4 bytes in CTR key to
+ * first 4 bytes in CTR IV
+ */
+ memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
+ ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
+ /* Set CTR key size */
+ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+ } else { /* non-authenc - has just one key */
+ ctx->enc_keylen = keylen;
+ ctx->auth_keylen = 0;
+ }
+
+ rc = validate_keys_sizes(ctx);
+ if (rc)
+ goto badkey;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ /* Get key material */
+ memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ if (ctx->enc_keylen == 24)
+ memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
+ rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ if (rc)
+ goto badkey;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ seq_len = hmac_setkey(desc, ctx);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ seq_len = xcbc_setkey(desc, ctx);
+ break;
+ case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
+ break; /* No auth. key setup */
+ default:
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+ rc = -ENOTSUPP;
+ goto badkey;
+ }
+
+ /* STAT_PHASE_3: Submit sequence to HW */
+
+ if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ goto setkey_error;
+ }
+ }
+
+ /* Update STAT_PHASE_3 */
+ return rc;
+
+badkey:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+setkey_error:
+ return rc;
+}
+
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen < 3)
+ return -EINVAL;
+
+ keylen -= 3;
+ memcpy(ctx->ctr_nonce, key + keylen, 3);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ /* Unsupported auth. sizes */
+ if (authsize == 0 ||
+ authsize > crypto_aead_maxauthsize(authenc)) {
+ return -ENOTSUPP;
+ }
+
+ ctx->authsize = authsize;
+ dev_dbg(dev, "authlen=%d\n", ctx->authsize);
+
+ return 0;
+}
+
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ switch (assoc_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ dev_dbg(dev, "ASSOC buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
+ areq->assoclen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+ set_din_not_last_indication(&desc[idx]);
+ break;
+ case CC_DMA_BUF_MLLI:
+ dev_dbg(dev, "ASSOC buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
+ areq_ctx->assoc.mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+ set_din_not_last_indication(&desc[idx]);
+ break;
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "Invalid ASSOC buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_authen_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size, int direct)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ unsigned int idx = *seq_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ switch (data_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ {
+ struct scatterlist *cipher =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dst_sgl : areq_ctx->src_sgl;
+
+ unsigned int offset =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dst_offset : areq_ctx->src_offset;
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (sg_dma_address(cipher) + offset),
+ areq_ctx->cryptlen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ }
+ case CC_DMA_BUF_MLLI:
+ {
+ /* DOUBLE-PASS flow (as default)
+ * assoc. + iv + data -compact in one table
+ * if assoclen is ZERO only IV perform
+ */
+ cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ u32 mlli_nents = areq_ctx->assoc.mlli_nents;
+
+ if (areq_ctx->is_single_pass) {
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ mlli_addr = areq_ctx->dst.sram_addr;
+ mlli_nents = areq_ctx->dst.mlli_nents;
+ } else {
+ mlli_addr = areq_ctx->src.sram_addr;
+ mlli_nents = areq_ctx->src.mlli_nents;
+ }
+ }
+
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
+ NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ }
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_cipher_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ switch (data_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (sg_dma_address(areq_ctx->src_sgl) +
+ areq_ctx->src_offset), areq_ctx->cryptlen,
+ NS_BIT);
+ set_dout_dlli(&desc[idx],
+ (sg_dma_address(areq_ctx->dst_sgl) +
+ areq_ctx->dst_offset),
+ areq_ctx->cryptlen, NS_BIT, 0);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ case CC_DMA_BUF_MLLI:
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
+ areq_ctx->src.mlli_nents, NS_BIT);
+ set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
+ areq_ctx->dst.mlli_nents, NS_BIT, 0);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_digest_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Get final ICV result */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ hw_desc_init(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ } else {
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_mode(&desc[idx], hash_mode);
+ }
+ } else { /*Decrypt*/
+ /* Get ICV out from hardware */
+ hw_desc_init(&desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_aes_not_hash_mode(&desc[idx]);
+ } else {
+ set_cipher_mode(&desc[idx], hash_mode);
+ }
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_set_cipher_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = req_ctx->hw_iv_size;
+ unsigned int idx = *seq_size;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Setup cipher state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_config0(&desc[idx], direct);
+ set_flow_mode(&desc[idx], ctx->flow_mode);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, NS_BIT);
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ else
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ /* Setup enc. key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_config0(&desc[idx], direct);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], ctx->flow_mode);
+ if (ctx->flow_mode == S_DIN_to_AES) {
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ } else {
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_des(&desc[idx], ctx->enc_keylen);
+ }
+ set_cipher_mode(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size, unsigned int data_flow_mode)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ cc_set_cipher_desc(req, desc, &idx);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* We must wait for DMA to write all cipher */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ /* Loading hash ipad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
+ NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int idx = *seq_size;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ /* Hash associated data */
+ if (req->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+
+ /* Hash IV */
+ *seq_size = idx;
+}
+
+static void cc_proc_scheme_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ /* Get final ICV result */
+ hw_desc_init(&desc[idx]);
+ set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_mode(&desc[idx], hash_mode);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
+ digest_size, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_mlli_to_sram(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+ !req_ctx->is_single_pass) {
+ dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ /* Copy MLLI table host-to-sram */
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+ }
+}
+
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+ enum cc_flow_mode setup_flow_mode,
+ bool is_single_pass)
+{
+ enum cc_flow_mode data_flow_mode;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = is_single_pass ?
+ AES_to_HASH_and_DOUT : DIN_AES_DOUT;
+ else
+ data_flow_mode = is_single_pass ?
+ DES_to_HASH_and_DOUT : DIN_DES_DOUT;
+ } else { /* Decrypt */
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = is_single_pass ?
+ AES_and_HASH : DIN_AES_DOUT;
+ else
+ data_flow_mode = is_single_pass ?
+ DES_and_HASH : DIN_DES_DOUT;
+ }
+
+ return data_flow_mode;
+}
+
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass) {
+ /**
+ * Single-pass flow
+ */
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+ return;
+ }
+
+ /**
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after..*/
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+
+ } else { /*DECRYPT*/
+ /* authenc first..*/
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ /* decrypt after.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ * must be after the cipher operation
+ */
+ cc_proc_digest_desc(req, desc, seq_size);
+ }
+}
+
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass) {
+ /**
+ * Single-pass flow
+ */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+ return;
+ }
+
+ /**
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after.. */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_digest_desc(req, desc, seq_size);
+ } else { /*DECRYPT*/
+ /* authenc first.. */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ /* decrypt after..*/
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ * must be after the cipher operation
+ */
+ cc_proc_digest_desc(req, desc, seq_size);
+ }
+}
+
+static int validate_data_size(struct cc_aead_ctx *ctx,
+ enum drv_crypto_direction direct,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int assoclen = req->assoclen;
+ unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ (req->cryptlen - ctx->authsize) : req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->cryptlen < ctx->authsize)
+ goto data_size_err;
+
+ areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
+
+ switch (ctx->flow_mode) {
+ case S_DIN_to_AES:
+ if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
+ goto data_size_err;
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ break;
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->plaintext_authenticate_only)
+ areq_ctx->is_single_pass = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(assoclen, sizeof(u32)))
+ areq_ctx->is_single_pass = false;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR &&
+ !IS_ALIGNED(cipherlen, sizeof(u32)))
+ areq_ctx->is_single_pass = false;
+
+ break;
+ case S_DIN_to_DES:
+ if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
+ goto data_size_err;
+ if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
+ areq_ctx->is_single_pass = false;
+ break;
+ default:
+ dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
+ goto data_size_err;
+ }
+
+ return 0;
+
+data_size_err:
+ return -EINVAL;
+}
+
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
+{
+ unsigned int len = 0;
+
+ if (header_size == 0)
+ return 0;
+
+ if (header_size < ((1UL << 16) - (1UL << 8))) {
+ len = 2;
+
+ pa0_buff[0] = (header_size >> 8) & 0xFF;
+ pa0_buff[1] = header_size & 0xFF;
+ } else {
+ len = 6;
+
+ pa0_buff[0] = 0xFF;
+ pa0_buff[1] = 0xFE;
+ pa0_buff[2] = (header_size >> 24) & 0xFF;
+ pa0_buff[3] = (header_size >> 16) & 0xFF;
+ pa0_buff[4] = (header_size >> 8) & 0xFF;
+ pa0_buff[5] = header_size & 0xFF;
+ }
+
+ return len;
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int cipher_flow_mode;
+ dma_addr_t mac_result;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_and_HASH;
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* load key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load ctr state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI,
+ req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load MAC key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* process assoc data */
+ if (req->assoclen > 0) {
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(&req_ctx->ccm_adata_sg),
+ AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* process the cipher */
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
+
+ /* Read temporal MAC */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
+ NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* load AES-CTR state (for last MAC calculation)*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* encrypt the "T" value and store MAC in mac_state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT);
+ set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+ return 0;
+}
+
+static int config_ccm_adata(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ //unsigned int size_of_a = 0, rem_a_size = 0;
+ unsigned int lp = req->iv[0];
+ /* Note: The code assume that req->iv[0] already contains the value
+ * of L' of RFC3610
+ */
+ unsigned int l = lp + 1; /* This is L' of RFC 3610. */
+ unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
+ u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+ u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+ u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ int rc;
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+ memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
+
+ /* taken from crypto/ccm.c */
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (l < 2 || l > 8) {
+ dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ return -EINVAL;
+ }
+ memcpy(b0, req->iv, AES_BLOCK_SIZE);
+
+ /* format control info per RFC 3610 and
+ * NIST Special Publication 800-38C
+ */
+ *b0 |= (8 * ((m - 2) / 2));
+ if (req->assoclen > 0)
+ *b0 |= 64; /* Enable bit 6 if Adata exists. */
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
+ if (rc) {
+ dev_err(dev, "message len overflow detected");
+ return rc;
+ }
+ /* END of "taken from crypto/ccm.c" */
+
+ /* l(a) - size of associated data. */
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+
+ memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
+ req->iv[15] = 1;
+
+ memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
+ ctr_count_0[15] = 0;
+
+ return 0;
+}
+
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ /* L' */
+ memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
+ /* For RFC 4309, always use 4 bytes for message length
+ * (at most 2^32-1 bytes).
+ */
+ areq_ctx->ctr_iv[0] = 3;
+
+ /* In RFC 4309 there is an 11-bytes nonce+IV part,
+ * that we build here.
+ */
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+ CCM_BLOCK_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+ CCM_BLOCK_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+ req->assoclen -= CCM_BLOCK_IV_SIZE;
+}
+
+static void cc_set_ghash_desc(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* process one zero block to generate hkey */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Load GHASH subkey */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Configure Hash Engine to work with GHASH.
+ * Since it was not possible to extend HASH submodes to add GHASH,
+ * The following command is necessary in order to
+ * select GHASH (according to HW designers)
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an
+ * initial state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
+ /* load AES/CTR initial CTR value inc by 2*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI,
+ req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static void cc_proc_gcm_result(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ dma_addr_t mac_result;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* process(ghash) gcm_block_len */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_aes_not_hash_mode(&desc[idx]);
+
+ idx++;
+
+ /* load AES/CTR initial CTR value inc by 1*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Memory Barrier */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* process GCTR on stored GHASH and store MAC in mac_state*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int cipher_flow_mode;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
+ //in RFC4543 no data to encrypt. just copy data from src to dest.
+ if (req_ctx->plaintext_authenticate_only) {
+ cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
+ return 0;
+ }
+
+ // for gcm and rfc4106.
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ if (req->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ /* process(gctr+ghash) */
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
+
+ return 0;
+}
+
+static int config_gcm_context(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ __be32 counter = cpu_to_be32(2);
+
+ dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req->assoclen, ctx->authsize);
+
+ memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
+
+ counter = cpu_to_be32(1);
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
+
+ if (!req_ctx->plaintext_authenticate_only) {
+ __be64 temp64;
+
+ temp64 = cpu_to_be64(req->assoclen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = cpu_to_be64(cryptlen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+ } else {
+ /* rfc4543=> all data(AAD,IV,Plain) are considered additional
+ * data that is nothing is encrypted.
+ */
+ __be64 temp64;
+
+ temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+ cryptlen) * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = 0;
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+ }
+
+ return 0;
+}
+
+static void cc_proc_rfc4_gcm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+ ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+ GCM_BLOCK_RFC4_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+ req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+}
+
+static int cc_proc_aead(struct aead_request *req,
+ enum drv_crypto_direction direct)
+{
+ int rc = 0;
+ int seq_len = 0;
+ struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+
+ dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ /* Check data length according to mode */
+ if (validate_data_size(ctx, direct, req)) {
+ dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+ req->cryptlen, req->assoclen);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_aead_complete;
+ cc_req.user_arg = (void *)req;
+
+ /* Setup request context */
+ areq_ctx->gen_ctx.op_type = direct;
+ areq_ctx->req_authsize = ctx->authsize;
+ areq_ctx->cipher_mode = ctx->cipher_mode;
+
+ /* STAT_PHASE_1: Map buffers */
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* Build CTR IV - Copy nonce from last 4 bytes in
+ * CTR key to first 4 bytes in CTR IV
+ */
+ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+ CTR_RFC3686_NONCE_SIZE);
+ if (!areq_ctx->backup_giv) /*User none-generated IV*/
+ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
+ req->iv, CTR_RFC3686_IV_SIZE);
+ /* Initialize counter portion of counter block */
+ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+ /* Replace with counter iv */
+ req->iv = areq_ctx->ctr_iv;
+ areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
+ } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
+ (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
+ areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
+ if (areq_ctx->ctr_iv != req->iv) {
+ memcpy(areq_ctx->ctr_iv, req->iv,
+ crypto_aead_ivsize(tfm));
+ req->iv = areq_ctx->ctr_iv;
+ }
+ } else {
+ areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
+ }
+
+ if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ rc = config_ccm_adata(req);
+ if (rc) {
+ dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
+ rc);
+ goto exit;
+ }
+ } else {
+ areq_ctx->ccm_hdr_size = ccm_header_size_null;
+ }
+
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ rc = config_gcm_context(req);
+ if (rc) {
+ dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
+ rc);
+ goto exit;
+ }
+ }
+
+ rc = cc_map_aead_request(ctx->drvdata, req);
+ if (rc) {
+ dev_err(dev, "map_request() failed\n");
+ goto exit;
+ }
+
+ /* do we need to generate IV? */
+ if (areq_ctx->backup_giv) {
+ /* set the DMA mapped IV address*/
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CTR_RFC3686_NONCE_SIZE;
+ cc_req.ivgen_dma_addr_len = 1;
+ } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ /* In ccm, the IV needs to exist both inside B0 and
+ * inside the counter.It is also copied to iv_dma_addr
+ * for other reasons (like returning it to the user).
+ * So, using 3 (identical) IV outputs.
+ */
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[1] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[2] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr_len = 3;
+ } else {
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
+ }
+
+ /* set the IV size (8/16 B long)*/
+ cc_req.ivgen_size = crypto_aead_ivsize(tfm);
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ /* Load MLLI tables to SRAM if necessary */
+ cc_mlli_to_sram(req, desc, &seq_len);
+
+ /*TODO: move seq len by reference */
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ cc_hmac_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ cc_xcbc_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_NULL:
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ cc_ccm(req, desc, &seq_len);
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR)
+ cc_gcm(req, desc, &seq_len);
+ break;
+ default:
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+ cc_unmap_aead_request(dev, req);
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
+
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_aead_request(dev, req);
+ }
+
+exit:
+ return rc;
+}
+
+static int cc_aead_encrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = true;
+
+ cc_proc_rfc4309_ccm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_aead_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->is_gcm4543 = true;
+ cc_proc_rfc4309_ccm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+out:
+ return rc;
+}
+
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
+
+ if (authsize != 16)
+ return -EINVAL;
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ //plaintext is not encryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_decrypt() above. */
+
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_decrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ //plaintext is not decryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+/* aead alg */
+static struct cc_alg_template aead_algs[] = {
+ {
+ .name = "authenc(hmac(sha1),cbc(aes))",
+ .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(xcbc(aes),cbc(aes))",
+ .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ccm(aes)",
+ .driver_name = "ccm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_ccm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "rfc4309(ccm(aes))",
+ .driver_name = "rfc4309-ccm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_rfc4309_ccm_setkey,
+ .setauthsize = cc_rfc4309_ccm_setauthsize,
+ .encrypt = cc_rfc4309_ccm_encrypt,
+ .decrypt = cc_rfc4309_ccm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CCM_BLOCK_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_gcm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_rfc4106_gcm_setkey,
+ .setauthsize = cc_rfc4106_gcm_setauthsize,
+ .encrypt = cc_rfc4106_gcm_encrypt,
+ .decrypt = cc_rfc4106_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_rfc4543_gcm_setkey,
+ .setauthsize = cc_rfc4543_gcm_setauthsize,
+ .encrypt = cc_rfc4543_gcm_encrypt,
+ .decrypt = cc_rfc4543_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+};
+
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+ struct device *dev)
+{
+ struct cc_crypto_alg *t_alg;
+ struct aead_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &tmpl->template_aead;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CC_CRA_PRIO;
+
+ alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ tmpl->type;
+ alg->init = cc_aead_init;
+ alg->exit = cc_aead_exit;
+
+ t_alg->aead_alg = *alg;
+
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->auth_mode = tmpl->auth_mode;
+
+ return t_alg;
+}
+
+int cc_aead_free(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_aead_handle *aead_handle =
+ (struct cc_aead_handle *)drvdata->aead_handle;
+
+ if (aead_handle) {
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
+ entry) {
+ crypto_unregister_aead(&t_alg->aead_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ kfree(aead_handle);
+ drvdata->aead_handle = NULL;
+ }
+
+ return 0;
+}
+
+int cc_aead_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_aead_handle *aead_handle;
+ struct cc_crypto_alg *t_alg;
+ int rc = -ENOMEM;
+ int alg;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
+ if (!aead_handle) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
+ INIT_LIST_HEAD(&aead_handle->aead_list);
+ drvdata->aead_handle = aead_handle;
+
+ aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+ MAX_HMAC_DIGEST_SIZE);
+
+ if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
+ dev_err(dev, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Linux crypto */
+ for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
+ if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
+ continue;
+
+ t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ aead_algs[alg].driver_name);
+ goto fail1;
+ }
+ t_alg->drvdata = drvdata;
+ rc = crypto_register_aead(&t_alg->aead_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ goto fail2;
+ } else {
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ }
+ }
+
+ return 0;
+
+fail2:
+ kfree(t_alg);
+fail1:
+ cc_aead_free(drvdata);
+fail0:
+ return rc;
+}
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
new file mode 100644
index 000000000000..5edf3b351fa4
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_aead.h
+ * ARM CryptoCell AEAD Crypto API
+ */
+
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+
+/* mac_cmp - HW writes 8 B but all bytes hold the same value */
+#define ICV_CMP_SIZE 8
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
+
+/* defines for AES GCM configuration buffer */
+#define GCM_BLOCK_LEN_SIZE 8
+
+#define GCM_BLOCK_RFC4_IV_OFFSET 4
+#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
+#define GCM_BLOCK_RFC4_NONCE_SIZE 4
+
+/* Offsets into AES CCM configuration buffer */
+#define CCM_B0_OFFSET 0
+#define CCM_A0_OFFSET 16
+#define CCM_CTR_COUNT_0_OFFSET 32
+/* CCM B0 and CTR_COUNT constants. */
+#define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
+
+enum aead_ccm_header_size {
+ ccm_header_size_null = -1,
+ ccm_header_size_zero = 0,
+ ccm_header_size_2 = 2,
+ ccm_header_size_6 = 6,
+ ccm_header_size_max = S32_MAX
+};
+
+struct aead_req_ctx {
+ /* Allocate cache line although only 4 bytes are needed to
+ * assure next field falls @ cache line
+ * Used for both: digest HW compare and CCM/GCM MAC value
+ */
+ u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+ u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+
+ //used in gcm
+ u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+ u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+ u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+ struct {
+ u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+ u8 len_c[GCM_BLOCK_LEN_SIZE];
+ } gcm_len_block;
+
+ u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+ /* HW actual size input */
+ unsigned int hw_iv_size ____cacheline_aligned;
+ /* used to prevent cache coherence problem */
+ u8 backup_mac[MAX_MAC_SIZE];
+ u8 *backup_iv; /*store iv for generated IV flow*/
+ u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
+ /* buffer for internal ccm configurations */
+ dma_addr_t ccm_iv0_dma_addr;
+ dma_addr_t icv_dma_addr; /* Phys. address of ICV */
+
+ //used in gcm
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc1_dma_addr;
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc2_dma_addr;
+ dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
+ dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
+ bool is_gcm4543;
+
+ u8 *icv_virt_addr; /* Virt. address of ICV */
+ struct async_gen_req_ctx gen_ctx;
+ struct cc_mlli assoc;
+ struct cc_mlli src;
+ struct cc_mlli dst;
+ struct scatterlist *src_sgl;
+ struct scatterlist *dst_sgl;
+ unsigned int src_offset;
+ unsigned int dst_offset;
+ enum cc_req_dma_buf_type assoc_buff_type;
+ enum cc_req_dma_buf_type data_buff_type;
+ struct mlli_params mlli_params;
+ unsigned int cryptlen;
+ struct scatterlist ccm_adata_sg;
+ enum aead_ccm_header_size ccm_hdr_size;
+ unsigned int req_authsize;
+ enum drv_cipher_mode cipher_mode;
+ bool is_icv_fragmented;
+ bool is_single_pass;
+ bool plaintext_authenticate_only; //for gcm_rfc4543
+};
+
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
+
+#endif /*__CC_AEAD_H__*/
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
new file mode 100644
index 000000000000..b32577477b4c
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -0,0 +1,1651 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+
+#include "cc_buffer_mgr.h"
+#include "cc_lli_defs.h"
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
+
+enum dma_buffer_type {
+ DMA_NULL_TYPE = -1,
+ DMA_SGL_TYPE = 1,
+ DMA_BUFF_TYPE = 2,
+};
+
+struct buff_mgr_handle {
+ struct dma_pool *mlli_buffs_pool;
+};
+
+union buffer_array_entry {
+ struct scatterlist *sgl;
+ dma_addr_t buffer_dma;
+};
+
+struct buffer_array {
+ unsigned int num_of_buffers;
+ union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+};
+
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+ switch (type) {
+ case CC_DMA_BUF_NULL:
+ return "BUF_NULL";
+ case CC_DMA_BUF_DLLI:
+ return "BUF_DLLI";
+ case CC_DMA_BUF_MLLI:
+ return "BUF_MLLI";
+ default:
+ return "BUF_INVALID";
+ }
+}
+
+/**
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ enum cc_sg_cpy_direct dir)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 skip = req->assoclen + req->cryptlen;
+
+ if (areq_ctx->is_gcm4543)
+ skip += crypto_aead_ivsize(tfm);
+
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
+ *
+ * @sg_list: SG list
+ * @nbytes: [IN] Total SGL data bytes.
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
+ */
+static unsigned int cc_get_sgl_nents(struct device *dev,
+ struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes,
+ bool *is_chained)
+{
+ unsigned int nents = 0;
+
+ while (nbytes && sg_list) {
+ if (sg_list->length) {
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
+ } else {
+ sg_list = (struct scatterlist *)sg_page(sg_list);
+ if (is_chained)
+ *is_chained = true;
+ }
+ }
+ dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
+ return nents;
+}
+
+/**
+ * cc_zero_sgl() - Zero scatter scatter list data.
+ *
+ * @sgl:
+ */
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
+{
+ struct scatterlist *current_sg = sgl;
+ int sg_index = 0;
+
+ while (sg_index <= data_len) {
+ if (!current_sg) {
+ /* reached the end of the sgl --> just return back */
+ return;
+ }
+ memset(sg_virt(current_sg), 0, current_sg->length);
+ sg_index += current_sg->length;
+ current_sg = sg_next(current_sg);
+ }
+}
+
+/**
+ * cc_copy_sg_portion() - Copy scatter list data,
+ * from to_skip to end, to dest and vice versa
+ *
+ * @dest:
+ * @sg:
+ * @to_skip:
+ * @end:
+ * @direct:
+ */
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
+{
+ u32 nents, lbytes;
+
+ nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+ (direct == CC_SG_TO_BUF));
+}
+
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+ u32 buff_size, u32 *curr_nents,
+ u32 **mlli_entry_pp)
+{
+ u32 *mlli_entry_p = *mlli_entry_pp;
+ u32 new_nents;
+
+ /* Verify there is no memory overflow*/
+ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+ return -ENOMEM;
+
+ /*handle buffer longer than 64 kbytes */
+ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
+ cc_lli_set_addr(mlli_entry_p, buff_dma);
+ cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
+ buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
+ mlli_entry_p = mlli_entry_p + 2;
+ (*curr_nents)++;
+ }
+ /*Last entry */
+ cc_lli_set_addr(mlli_entry_p, buff_dma);
+ cc_lli_set_size(mlli_entry_p, buff_size);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p = mlli_entry_p + 2;
+ *mlli_entry_pp = mlli_entry_p;
+ (*curr_nents)++;
+ return 0;
+}
+
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
+{
+ struct scatterlist *curr_sgl = sgl;
+ u32 *mlli_entry_p = *mlli_entry_pp;
+ s32 rc = 0;
+
+ for ( ; (curr_sgl && sgl_data_len);
+ curr_sgl = sg_next(curr_sgl)) {
+ u32 entry_data_len =
+ (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
+ sg_dma_len(curr_sgl) - sgl_offset :
+ sgl_data_len;
+ sgl_data_len -= entry_data_len;
+ rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+ sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
+ if (rc)
+ return rc;
+
+ sgl_offset = 0;
+ }
+ *mlli_entry_pp = mlli_entry_p;
+ return 0;
+}
+
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+ struct mlli_params *mlli_params, gfp_t flags)
+{
+ u32 *mlli_p;
+ u32 total_nents = 0, prev_total_nents = 0;
+ int rc = 0, i;
+
+ dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
+
+ /* Allocate memory from the pointed pool */
+ mlli_params->mlli_virt_addr =
+ dma_pool_alloc(mlli_params->curr_pool, flags,
+ &mlli_params->mlli_dma_addr);
+ if (!mlli_params->mlli_virt_addr) {
+ dev_err(dev, "dma_pool_alloc() failed\n");
+ rc = -ENOMEM;
+ goto build_mlli_exit;
+ }
+ /* Point to start of MLLI */
+ mlli_p = (u32 *)mlli_params->mlli_virt_addr;
+ /* go over all SG's and link it to one MLLI table */
+ for (i = 0; i < sg_data->num_of_buffers; i++) {
+ union buffer_array_entry *entry = &sg_data->entry[i];
+ u32 tot_len = sg_data->total_data_len[i];
+ u32 offset = sg_data->offset[i];
+
+ if (sg_data->type[i] == DMA_SGL_TYPE)
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
+ offset, &total_nents,
+ &mlli_p);
+ else /*DMA_BUFF_TYPE*/
+ rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
+ tot_len, &total_nents,
+ &mlli_p);
+ if (rc)
+ return rc;
+
+ /* set last bit in the current table */
+ if (sg_data->mlli_nents[i]) {
+ /*Calculate the current MLLI table length for the
+ *length field in the descriptor
+ */
+ *sg_data->mlli_nents[i] +=
+ (total_nents - prev_total_nents);
+ prev_total_nents = total_nents;
+ }
+ }
+
+ /* Set MLLI size for the bypass operation */
+ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
+
+ dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
+ mlli_params->mlli_len);
+
+build_mlli_exit:
+ return rc;
+}
+
+static void cc_add_buffer_entry(struct device *dev,
+ struct buffer_array *sgl_data,
+ dma_addr_t buffer_dma, unsigned int buffer_len,
+ bool is_last_entry, u32 *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
+ index, &buffer_dma, buffer_len, is_last_entry);
+ sgl_data->nents[index] = 1;
+ sgl_data->entry[index].buffer_dma = buffer_dma;
+ sgl_data->offset[index] = 0;
+ sgl_data->total_data_len[index] = buffer_len;
+ sgl_data->type[index] = DMA_BUFF_TYPE;
+ sgl_data->is_last[index] = is_last_entry;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index])
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+ unsigned int nents, struct scatterlist *sgl,
+ unsigned int data_len, unsigned int data_offset,
+ bool is_last_table, u32 *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ index, nents, sgl, data_len, is_last_table);
+ sgl_data->nents[index] = nents;
+ sgl_data->entry[index].sgl = sgl;
+ sgl_data->offset[index] = data_offset;
+ sgl_data->total_data_len[index] = data_len;
+ sgl_data->type[index] = DMA_SGL_TYPE;
+ sgl_data->is_last[index] = is_last_table;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index])
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+ enum dma_data_direction direction)
+{
+ u32 i, j;
+ struct scatterlist *l_sg = sg;
+
+ for (i = 0; i < nents; i++) {
+ if (!l_sg)
+ break;
+ if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
+ dev_err(dev, "dma_map_page() sg buffer failed\n");
+ goto err;
+ }
+ l_sg = sg_next(l_sg);
+ }
+ return nents;
+
+err:
+ /* Restore mapped parts */
+ for (j = 0; j < i; j++) {
+ if (!sg)
+ break;
+ dma_unmap_sg(dev, sg, 1, direction);
+ sg = sg_next(sg);
+ }
+ return 0;
+}
+
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nbytes, int direction, u32 *nents,
+ u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
+{
+ bool is_chained = false;
+
+ if (sg_is_last(sg)) {
+ /* One entry only case -set to DLLI */
+ if (dma_map_sg(dev, sg, 1, direction) != 1) {
+ dev_err(dev, "dma_map_sg() single buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
+ sg->offset, sg->length);
+ *lbytes = nbytes;
+ *nents = 1;
+ *mapped_nents = 1;
+ } else { /*sg_is_last*/
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+ &is_chained);
+ if (*nents > max_sg_nents) {
+ *nents = 0;
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
+ return -ENOMEM;
+ }
+ if (!is_chained) {
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
+ }
+ } else {
+ /*In this case the driver maps entry by entry so it
+ * must have the same nents before and after map
+ */
+ *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+ direction);
+ if (*mapped_nents != *nents) {
+ *nents = *mapped_nents;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+ u8 *config_data, struct buffer_array *sg_data,
+ unsigned int assoclen)
+{
+ dev_dbg(dev, " handle additional data config set to DLLI\n");
+ /* create sg for the current buffer */
+ sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+ AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+ if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() config buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_page(&areq_ctx->ccm_adata_sg),
+ sg_virt(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
+ /* prepare for case of MLLI */
+ if (assoclen > 0) {
+ cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
+ }
+ return 0;
+}
+
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
+{
+ dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
+ /* create sg for the current buffer */
+ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() src buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+ sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->in_nents = 0;
+ /* prepare for case of MLLI */
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
+ return 0;
+}
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize, struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+
+ if (req_ctx->gen_ctx.iv_dma_addr) {
+ dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ &req_ctx->gen_ctx.iv_dma_addr, ivsize);
+ dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+ ivsize,
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ /* Release pool */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+ req_ctx->mlli_params.mlli_virt_addr) {
+ dma_pool_free(req_ctx->mlli_params.curr_pool,
+ req_ctx->mlli_params.mlli_virt_addr,
+ req_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+
+ if (src != dst) {
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ }
+}
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags)
+{
+ struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+ struct mlli_params *mlli_params = &req_ctx->mlli_params;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct buffer_array sg_data;
+ u32 dummy = 0;
+ int rc = 0;
+ u32 mapped_nents = 0;
+
+ req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* Map IV buffer */
+ if (ivsize) {
+ dump_byte_array("iv", (u8 *)info, ivsize);
+ req_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, (void *)info,
+ ivsize,
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ ivsize, info);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
+ } else {
+ req_ctx->gen_ctx.iv_dma_addr = 0;
+ }
+
+ /* Map the src SGL */
+ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (rc) {
+ rc = -ENOMEM;
+ goto cipher_exit;
+ }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (src == dst) {
+ /* Handle inplace operation */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ req_ctx->out_nents = 0;
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ }
+ } else {
+ /* Map the dst sg */
+ if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
+ rc = -ENOMEM;
+ goto cipher_exit;
+ }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
+ }
+ }
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto cipher_exit;
+ }
+
+ dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+ cc_dma_buf_type(req_ctx->dma_buf_type));
+
+ return 0;
+
+cipher_exit:
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ return rc;
+}
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ u32 dummy;
+ bool chained;
+ u32 size_to_unmap = 0;
+
+ if (areq_ctx->mac_buf_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
+ MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->hkey_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->gcm_block_len_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc1_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc2_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+ }
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if (areq_ctx->ccm_iv0_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
+ }
+ if (areq_ctx->gen_ctx.iv_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, DMA_BIDIRECTIONAL);
+ }
+
+ /*In case a pool was set, a table was
+ *allocated and should be released
+ */
+ if (areq_ctx->mlli_params.curr_pool) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+ req->assoclen, req->cryptlen);
+ size_to_unmap = req->assoclen + req->cryptlen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_unmap += areq_ctx->req_authsize;
+ if (areq_ctx->is_gcm4543)
+ size_to_unmap += crypto_aead_ivsize(tfm);
+
+ dma_unmap_sg(dev, req->src,
+ cc_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
+ DMA_BIDIRECTIONAL);
+ if (req->src != req->dst) {
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
+ dma_unmap_sg(dev, req->dst,
+ cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+ &dummy, &chained),
+ DMA_BIDIRECTIONAL);
+ }
+ if (drvdata->coherent &&
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst) {
+ /* copy back mac from temporary location to deal with possible
+ * data memory overriding that caused by cache coherence
+ * problem.
+ */
+ cc_copy_mac(dev, req, CC_SG_FROM_BUF);
+ }
+}
+
+static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
+ unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size,
+ bool *is_icv_fragmented)
+{
+ unsigned int icv_max_size = 0;
+ unsigned int icv_required_size = authsize > last_entry_data_size ?
+ (authsize - last_entry_data_size) :
+ authsize;
+ unsigned int nents;
+ unsigned int i;
+
+ if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
+ *is_icv_fragmented = false;
+ return 0;
+ }
+
+ for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
+ if (!sgl)
+ break;
+ sgl = sg_next(sgl);
+ }
+
+ if (sgl)
+ icv_max_size = sgl->length;
+
+ if (last_entry_data_size > authsize) {
+ /* ICV attached to data in last entry (not fragmented!) */
+ nents = 0;
+ *is_icv_fragmented = false;
+ } else if (last_entry_data_size == authsize) {
+ /* ICV placed in whole last entry (not fragmented!) */
+ nents = 1;
+ *is_icv_fragmented = false;
+ } else if (icv_max_size > icv_required_size) {
+ nents = 1;
+ *is_icv_fragmented = true;
+ } else if (icv_max_size == icv_required_size) {
+ nents = 2;
+ *is_icv_fragmented = true;
+ } else {
+ dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
+ MAX_ICV_NENTS_SUPPORTED);
+ nents = -1; /*unsupported*/
+ }
+ dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
+ (*is_icv_fragmented ? "true" : "false"), nents);
+
+ return nents;
+}
+
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ if (!req->iv) {
+ areq_ctx->gen_ctx.iv_dma_addr = 0;
+ goto chain_iv_exit;
+ }
+
+ areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+ hw_iv_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ hw_iv_size, req->iv);
+ rc = -ENOMEM;
+ goto chain_iv_exit;
+ }
+
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
+ // TODO: what about CTR?? ask Ron
+ if (do_chain && areq_ctx->plaintext_authenticate_only) {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
+ unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
+ /* Chain to given list */
+ cc_add_buffer_entry(dev, sg_data,
+ (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+ iv_size_to_authenc, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+ }
+
+chain_iv_exit:
+ return rc;
+}
+
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = 0;
+ u32 mapped_nents = 0;
+ struct scatterlist *current_sg = req->src;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int sg_index = 0;
+ u32 size_of_assoc = req->assoclen;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (areq_ctx->is_gcm4543)
+ size_of_assoc += crypto_aead_ivsize(tfm);
+
+ if (!sg_data) {
+ rc = -EINVAL;
+ goto chain_assoc_exit;
+ }
+
+ if (req->assoclen == 0) {
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
+ areq_ctx->assoc.nents = 0;
+ areq_ctx->assoc.mlli_nents = 0;
+ dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ goto chain_assoc_exit;
+ }
+
+ //iterate over the sgl to see how many entries are for associated data
+ //it is assumed that if we reach here , the sgl is already mapped
+ sg_index = current_sg->length;
+ //the first entry in the scatter list contains all the associated data
+ if (sg_index > size_of_assoc) {
+ mapped_nents++;
+ } else {
+ while (sg_index <= size_of_assoc) {
+ current_sg = sg_next(current_sg);
+ /* if have reached the end of the sgl, then this is
+ * unexpected
+ */
+ if (!current_sg) {
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
+ }
+ sg_index += current_sg->length;
+ mapped_nents++;
+ }
+ }
+ if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->assoc.nents = mapped_nents;
+
+ /* in CCM case we have additional entry for
+ * ccm header configurations
+ */
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+ dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ rc = -ENOMEM;
+ goto chain_assoc_exit;
+ }
+ }
+
+ if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
+ else
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+
+ if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+ dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+ req->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+ }
+
+chain_assoc_exit:
+ return rc;
+}
+
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+ u32 *src_last_bytes, u32 *dst_last_bytes)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+
+ areq_ctx->is_icv_fragmented = false;
+ if (req->src == req->dst) {
+ /*INPLACE*/
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
+ (*dst_last_bytes - authsize);
+ }
+}
+
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ int rc = 0, icv_nents;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct scatterlist *sg;
+
+ if (req->src == req->dst) {
+ /*INPLACE*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (areq_ctx->is_icv_fragmented) {
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to
+ * simplify MAC verification upon request completion
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /* In coherent platforms (e.g. ACP)
+ * already copying ICV for any
+ * INPLACE-DECRYPT operation, hence
+ * we must neglect this code.
+ */
+ if (!drvdata->coherent)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+ } else {
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ areq_ctx->icv_dma_addr =
+ areq_ctx->mac_buf_dma_addr;
+ }
+ } else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to simplify
+ * MAC verification upon request completion
+ */
+ if (areq_ctx->is_icv_fragmented) {
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
+ } else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize, *dst_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (!areq_ctx->is_icv_fragmented) {
+ sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
+ /* Contig. ICV */
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*dst_last_bytes - authsize);
+ } else {
+ areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ }
+ }
+
+prepare_data_mlli_exit:
+ return rc;
+}
+
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last_table, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(drvdata);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ unsigned int src_last_bytes = 0, dst_last_bytes = 0;
+ int rc = 0;
+ u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+ u32 offset = 0;
+ /* non-inplace mode */
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 sg_index = 0;
+ bool chained = false;
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+ u32 size_to_skip = req->assoclen;
+
+ if (is_gcm4543)
+ size_to_skip += crypto_aead_ivsize(tfm);
+
+ offset = size_to_skip;
+
+ if (!sg_data)
+ return -EINVAL;
+
+ areq_ctx->src_sgl = req->src;
+ areq_ctx->dst_sgl = req->dst;
+
+ if (is_gcm4543)
+ size_for_map += crypto_aead_ivsize(tfm);
+
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+ &src_last_bytes, &chained);
+ sg_index = areq_ctx->src_sgl->length;
+ //check where the data starts
+ while (sg_index <= size_to_skip) {
+ offset -= areq_ctx->src_sgl->length;
+ areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
+ //if have reached the end of the sgl, then this is unexpected
+ if (!areq_ctx->src_sgl) {
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
+ }
+ sg_index += areq_ctx->src_sgl->length;
+ src_mapped_nents--;
+ }
+ if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+
+ areq_ctx->src.nents = src_mapped_nents;
+
+ areq_ctx->src_offset = offset;
+
+ if (req->src != req->dst) {
+ size_for_map = req->assoclen + req->cryptlen;
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ if (is_gcm4543)
+ size_for_map += crypto_aead_ivsize(tfm);
+
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (rc) {
+ rc = -ENOMEM;
+ goto chain_data_exit;
+ }
+ }
+
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+ &dst_last_bytes, &chained);
+ sg_index = areq_ctx->dst_sgl->length;
+ offset = size_to_skip;
+
+ //check where the data starts
+ while (sg_index <= size_to_skip) {
+ offset -= areq_ctx->dst_sgl->length;
+ areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
+ //if have reached the end of the sgl, then this is unexpected
+ if (!areq_ctx->dst_sgl) {
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
+ }
+ sg_index += areq_ctx->dst_sgl->length;
+ dst_mapped_nents--;
+ }
+ if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->dst.nents = dst_mapped_nents;
+ areq_ctx->dst_offset = offset;
+ if (src_mapped_nents > 1 ||
+ dst_mapped_nents > 1 ||
+ do_chain) {
+ areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+ rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes,
+ &dst_last_bytes, is_last_table);
+ } else {
+ areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+ cc_prepare_aead_data_dlli(req, &src_last_bytes,
+ &dst_last_bytes);
+ }
+
+chain_data_exit:
+ return rc;
+}
+
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ u32 curr_mlli_size = 0;
+
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+ areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
+ curr_mlli_size = areq_ctx->assoc.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ }
+
+ if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+ /*Inplace case dst nents equal to src nents*/
+ if (req->src == req->dst) {
+ areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
+ areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ if (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_DECRYPT) {
+ areq_ctx->src.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr =
+ areq_ctx->src.sram_addr +
+ areq_ctx->src.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ areq_ctx->dst.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->src.sram_addr =
+ areq_ctx->dst.sram_addr +
+ areq_ctx->dst.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->dst.mlli_nents;
+ }
+ }
+ }
+}
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct buffer_array sg_data;
+ unsigned int authsize = areq_ctx->req_authsize;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+ dma_addr_t dma_addr;
+ u32 mapped_nents = 0;
+ u32 dummy = 0; /*used for the assoc data fragments */
+ u32 size_to_map = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* copy mac to a temporary location to deal with possible
+ * data memory overriding that caused by cache coherence problem.
+ */
+ if (drvdata->coherent &&
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+ /* cacluate the size for cipher remove ICV in decrypt*/
+ areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - authsize);
+
+ dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->mac_buf_dma_addr = dma_addr;
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+
+ dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, addr);
+ areq_ctx->ccm_iv0_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+ if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, req->assoclen)) {
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, areq_ctx->hkey);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->hkey_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_block_len_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
+ areq_ctx->gcm_iv_inc1_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
+ areq_ctx->gcm_iv_inc2_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
+ }
+
+ size_to_map = req->cryptlen + req->assoclen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_map += authsize;
+
+ if (is_gcm4543)
+ size_to_map += crypto_aead_ivsize(tfm);
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->src.nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+ if (rc) {
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+
+ if (areq_ctx->is_single_pass) {
+ /*
+ * Create MLLI table for:
+ * (1) Assoc. data
+ * (2) Src/Dst SGLs
+ * Note: IV is contg. buffer (not an SGL)
+ */
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ } else { /* DOUBLE-PASS flow */
+ /*
+ * Prepare MLLI table(s) in this order:
+ *
+ * If ENCRYPT/DECRYPT (inplace):
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src/dst (inplace operation)
+ *
+ * If ENCRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for dst
+ * (4) MLLI for src
+ *
+ * If DECRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src
+ * (4) MLLI for dst
+ */
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+ if (rc)
+ goto aead_map_failure;
+ }
+
+ /* Mlli support -start building the MLLI according to the above
+ * results
+ */
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto aead_map_failure;
+
+ cc_update_aead_mlli_nents(drvdata, req);
+ dev_dbg(dev, "assoc params mn %d\n",
+ areq_ctx->assoc.mlli_nents);
+ dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
+ dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
+ }
+ return 0;
+
+aead_map_failure:
+ cc_unmap_aead_request(dev, req);
+ return rc;
+}
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
+ /* nothing to do */
+ return 0;
+ }
+
+ /*TODO: copy data in case that buffer is enough for operation */
+ /* map the previous buffer */
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
+ return -ENOMEM;
+ }
+ }
+
+ if (src && nbytes > 0 && do_update) {
+ if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
+ goto unmap_curr_buff;
+ }
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = nbytes;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ /*build mlli */
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ goto fail_unmap_din;
+ }
+ /* change the buffer index for the unmap function */
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
+ dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+ cc_dma_buf_type(areq_ctx->data_dma_buf_type));
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ u8 *next_buff = cc_next_buf(areq_ctx);
+ u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ unsigned int update_data_len;
+ u32 total_in_len = nbytes + *curr_buff_cnt;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ unsigned int swap_index = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ areq_ctx->curr_sg = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (total_in_len < block_size) {
+ dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
+ areq_ctx->in_nents =
+ cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ sg_copy_to_buffer(src, areq_ctx->in_nents,
+ &curr_buff[*curr_buff_cnt], nbytes);
+ *curr_buff_cnt += nbytes;
+ return 1;
+ }
+
+ /* Calculate the residue size*/
+ *next_buff_cnt = total_in_len & (block_size - 1);
+ /* update data len */
+ update_data_len = total_in_len - *next_buff_cnt;
+
+ dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
+ *next_buff_cnt, update_data_len);
+
+ /* Copy the new residue to next buffer */
+ if (*next_buff_cnt) {
+ dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ next_buff, (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, CC_SG_TO_BUF);
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
+ return -ENOMEM;
+ }
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (update_data_len > *curr_buff_cnt) {
+ if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents)) {
+ goto unmap_curr_buff;
+ }
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ /* only one entry in the SG and no previous data */
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = update_data_len;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ goto fail_unmap_din;
+ }
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
+
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ u32 *prev_len = cc_next_buf_cnt(areq_ctx);
+
+ /*In case a pool was set, a table was
+ *allocated and should be released
+ */
+ if (areq_ctx->mlli_params.curr_pool) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ if (src && areq_ctx->in_nents) {
+ dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
+ dma_unmap_sg(dev, src,
+ areq_ctx->in_nents, DMA_TO_DEVICE);
+ }
+
+ if (*prev_len) {
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ &sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ if (!do_revert) {
+ /* clean the previous data length for update
+ * operation
+ */
+ *prev_len = 0;
+ } else {
+ areq_ctx->buff_index ^= 1;
+ }
+ }
+}
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct buff_mgr_handle *buff_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
+ if (!buff_mgr_handle)
+ return -ENOMEM;
+
+ drvdata->buff_mgr_handle = buff_mgr_handle;
+
+ buff_mgr_handle->mlli_buffs_pool =
+ dma_pool_create("dx_single_mlli_tables", dev,
+ MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
+ LLI_ENTRY_BYTE_SIZE,
+ MLLI_TABLE_MIN_ALIGNMENT, 0);
+
+ if (!buff_mgr_handle->mlli_buffs_pool)
+ goto error;
+
+ return 0;
+
+error:
+ cc_buffer_mgr_fini(drvdata);
+ return -ENOMEM;
+}
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
+
+ if (buff_mgr_handle) {
+ dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
+ kfree(drvdata->buff_mgr_handle);
+ drvdata->buff_mgr_handle = NULL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
new file mode 100644
index 000000000000..3ec4b4db5247
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+ CC_DMA_BUF_NULL = 0,
+ CC_DMA_BUF_DLLI,
+ CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+ CC_SG_TO_BUF = 0,
+ CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+ cc_sram_addr_t sram_addr;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+ struct dma_pool *curr_pool;
+ u8 *mlli_virt_addr;
+ dma_addr_t mlli_dma_addr;
+ u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize,
+ struct scatterlist *src, struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
+
+#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 000000000000..df98f7afe645
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+
+#include "cc_driver.h"
+#include "cc_lli_defs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
+
+#define MAX_ABLKCIPHER_SEQ_LEN 6
+
+#define template_skcipher template_u.skcipher
+
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
+ struct list_head alg_list;
+};
+
+struct cc_user_key_info {
+ u8 *key;
+ dma_addr_t key_dma_addr;
+};
+
+struct cc_hw_key_info {
+ enum cc_hw_crypto_key key1_slot;
+ enum cc_hw_crypto_key key2_slot;
+};
+
+struct cc_cipher_ctx {
+ struct cc_drvdata *drvdata;
+ int keylen;
+ int key_round_number;
+ int cipher_mode;
+ int flow_mode;
+ unsigned int flags;
+ struct cc_user_key_info user;
+ struct cc_hw_key_info hw;
+ struct crypto_shash *shash_tfm;
+};
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ switch (size) {
+ case CC_AES_128_BIT_KEY_SIZE:
+ case CC_AES_192_BIT_KEY_SIZE:
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+ ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+ ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+ return 0;
+ break;
+ case CC_AES_256_BIT_KEY_SIZE:
+ return 0;
+ case (CC_AES_192_BIT_KEY_SIZE * 2):
+ case (CC_AES_256_BIT_KEY_SIZE * 2):
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ case S_DIN_to_DES:
+ if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+ unsigned int size)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_XTS:
+ if (size >= CC_MIN_AES_XTS_SIZE &&
+ size <= CC_MAX_AES_XTS_SIZE &&
+ IS_ALIGNED(size, AES_BLOCK_SIZE))
+ return 0;
+ break;
+ case DRV_CIPHER_CBC_CTS:
+ if (size >= AES_BLOCK_SIZE)
+ return 0;
+ break;
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ if (IS_ALIGNED(size, AES_BLOCK_SIZE))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case S_DIN_to_DES:
+ if (IS_ALIGNED(size, DES_BLOCK_SIZE))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int cc_cipher_init(struct crypto_tfm *tfm)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ int rc = 0;
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct cipher_req_ctx));
+
+ ctx_p->cipher_mode = cc_alg->cipher_mode;
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
+
+ /* Allocate key buffer, cache line aligned */
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+ if (!ctx_p->user.key)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
+
+ /* Map key buffer */
+ ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+ max_key_buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+ dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ }
+
+ return rc;
+}
+
+static void cc_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ dev_dbg(dev, "Clearing context @%p for %s\n",
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Free hash tfm for essiv */
+ crypto_free_shash(ctx_p->shash_tfm);
+ ctx_p->shash_tfm = NULL;
+ }
+
+ /* Unmap key buffer */
+ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+ DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+ &ctx_p->user.key_dma_addr);
+
+ /* Free key buffer in context */
+ kzfree(ctx_p->user.key);
+ dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+}
+
+struct tdes_keys {
+ u8 key1[DES_KEY_SIZE];
+ u8 key2[DES_KEY_SIZE];
+ u8 key3[DES_KEY_SIZE];
+};
+
+static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
+{
+ switch (slot_num) {
+ case 0:
+ return KFDE0_KEY;
+ case 1:
+ return KFDE1_KEY;
+ case 2:
+ return KFDE2_KEY;
+ case 3:
+ return KFDE3_KEY;
+ }
+ return END_OF_KEYS;
+}
+
+static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+
+ dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dump_byte_array("key", (u8 *)key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (validate_keys_sizes(ctx_p, keylen)) {
+ dev_err(dev, "Unsupported key size %d.\n", keylen);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (cc_is_hw_key(tfm)) {
+ /* setting HW key slots */
+ struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
+
+ if (ctx_p->flow_mode != S_DIN_to_AES) {
+ dev_err(dev, "HW key not supported for non-AES flows\n");
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki->hw_key1);
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki->hw_key1 == hki->hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki->hw_key1, hki->hw_key2);
+ return -EINVAL;
+ }
+ ctx_p->hw.key2_slot =
+ hw_key_to_cc_hw_key(hki->hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki->hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->keylen = keylen;
+ dev_dbg(dev, "cc_is_hw_key ret 0");
+
+ return 0;
+ }
+
+ /*
+ * Verify DES weak keys
+ * Note that we're dropping the expanded key since the
+ * HW does the expansion on its own.
+ */
+ if (ctx_p->flow_mode == S_DIN_to_DES) {
+ if (keylen == DES3_EDE_KEY_SIZE &&
+ __des3_ede_setkey(tmp, &tfm->crt_flags, key,
+ DES3_EDE_KEY_SIZE)) {
+ dev_dbg(dev, "weak 3DES key");
+ return -EINVAL;
+ } else if (!des_ekey(tmp, key) &&
+ (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ dev_dbg(dev, "weak DES key");
+ return -EINVAL;
+ }
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+ xts_check_key(tfm, key, keylen)) {
+ dev_dbg(dev, "weak XTS key");
+ return -EINVAL;
+ }
+
+ /* STAT_PHASE_1: Copy key to ctx */
+ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+
+ memcpy(ctx_p->user.key, key, keylen);
+ if (keylen == 24)
+ memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* sha256 for key2 - use sw implementation */
+ int key_len = keylen >> 1;
+ int err;
+
+ SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+ desc->tfm = ctx_p->shash_tfm;
+
+ err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
+ if (err) {
+ dev_err(dev, "Failed to hash ESSIV key.\n");
+ return err;
+ }
+ }
+ dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+ ctx_p->keylen = keylen;
+
+ dev_dbg(dev, "return safely");
+ return 0;
+}
+
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Load cipher state */
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+ NS_BIT);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
+ }
+ (*seq_size)++;
+ /*FALLTHROUGH*/
+ case DRV_CIPHER_ECB:
+ /* Load key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, ((key_len == 24) ?
+ AES_MAX_KEY_SIZE :
+ key_len), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+
+ /* load XEX key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key2_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ (key_dma_addr + (key_len / 2)),
+ (key_len / 2), NS_BIT);
+ }
+ set_xex_data_unit_size(&desc[*seq_size], du_size);
+ set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+ (*seq_size)++;
+
+ /* Set state */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ void *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = ctx_p->flow_mode;
+
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ flow_mode = DIN_AES_DOUT;
+ break;
+ case S_DIN_to_DES:
+ flow_mode = DIN_DES_DOUT;
+ break;
+ default:
+ dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
+ return;
+ }
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!areq ? 0 : 1));
+ if (areq)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
+ /* bypass */
+ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+ &req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_MLLI,
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT);
+ if (req_ctx->out_nents == 0) {
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ set_dout_mlli(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT,
+ (!areq ? 0 : 1));
+ } else {
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr +
+ (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
+ set_dout_mlli(&desc[*seq_size],
+ (ctx_p->drvdata->mlli_sram_addr +
+ (LLI_ENTRY_BYTE_SIZE *
+ req_ctx->in_mlli_nents)),
+ req_ctx->out_mlli_nents, NS_BIT,
+ (!areq ? 0 : 1));
+ }
+ if (areq)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ }
+}
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+{
+ struct skcipher_request *req = (struct skcipher_request *)cc_req;
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ (req->cryptlen - ivsize),
+ ivsize, 0);
+ }
+
+ skcipher_request_complete(req, err);
+}
+
+static int cc_cipher_process(struct skcipher_request *req,
+ enum drv_crypto_direction direction)
+{
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->cryptlen;
+ void *iv = req->iv;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ int rc, cts_restore_flag = 0;
+ unsigned int seq_len = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n",
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ "Encrypt" : "Decrypt"), req, iv, nbytes);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ /* TODO: check data length according to mode */
+ if (validate_data_size(ctx_p, nbytes)) {
+ dev_err(dev, "Unsupported data size %d.\n", nbytes);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ rc = -EINVAL;
+ goto exit_process;
+ }
+ if (nbytes == 0) {
+ /* No data to process is valid */
+ rc = 0;
+ goto exit_process;
+ }
+
+ /* The IV we are handed may be allocted from the stack so
+ * we must copy it to a DMAable buffer before use.
+ */
+ req_ctx->iv = kmemdup(iv, ivsize, flags);
+ if (!req_ctx->iv) {
+ rc = -ENOMEM;
+ goto exit_process;
+ }
+
+ /*For CTS in case of data size aligned to 16 use CBC mode*/
+ if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+ ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
+ ctx_p->cipher_mode = DRV_CIPHER_CBC;
+ cts_restore_flag = 1;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_cipher_complete;
+ cc_req.user_arg = (void *)req;
+
+#ifdef ENABLE_CYCLE_COUNT
+ cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
+
+#endif
+
+ /* Setup request context */
+ req_ctx->gen_ctx.op_type = direction;
+
+ /* STAT_PHASE_1: Map buffers */
+
+ rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+ req_ctx->iv, src, dst, flags);
+ if (rc) {
+ dev_err(dev, "map_request() failed\n");
+ goto exit_process;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ /* Setup processing */
+ cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Data processing */
+ cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+ &seq_len);
+
+ /* do we need to generate IV? */
+ if (req_ctx->is_giv) {
+ cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
+ /* set the IV size (8/16 B long)*/
+ cc_req.ivgen_size = ivsize;
+ }
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+
+ rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+ &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ /* Failed to send the request or request completed
+ * synchronously
+ */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ }
+
+exit_process:
+ if (cts_restore_flag)
+ ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ kzfree(req_ctx->backup_info);
+ kzfree(req_ctx->iv);
+ }
+
+ return rc;
+}
+
+static int cc_cipher_encrypt(struct skcipher_request *req)
+{
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+
+ req_ctx->is_giv = false;
+ req_ctx->backup_info = NULL;
+
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int cc_cipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ /*
+ * Allocate and save the last IV sized bytes of the source, which will
+ * be lost in case of in-place decryption and might be needed for CTS.
+ */
+ req_ctx->backup_info = kmalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+ (req->cryptlen - ivsize), ivsize, 0);
+ req_ctx->is_giv = false;
+
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+/* Block cipher alg */
+static const struct cc_alg_template skcipher_algs[] = {
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "xts512(aes)",
+ .driver_name = "xts-aes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "xts4096(aes)",
+ .driver_name = "xts-aes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv(aes)",
+ .driver_name = "essiv-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv512(aes)",
+ .driver_name = "essiv-aes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv4096(aes)",
+ .driver_name = "essiv-aes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker(aes)",
+ .driver_name = "bitlocker-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker512(aes)",
+ .driver_name = "bitlocker-aes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker4096(aes)",
+ .driver_name = "bitlocker-aes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ofb(aes)",
+ .driver_name = "ofb-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_OFB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cts1(cbc(aes))",
+ .driver_name = "cts1-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC_CTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-3des-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-ccree",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ecb(des)",
+ .driver_name = "ecb-des-ccree",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+};
+
+static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
+ struct device *dev)
+{
+ struct cc_crypto_alg *t_alg;
+ struct skcipher_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &t_alg->skcipher_alg;
+
+ memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CC_CRA_PRIO;
+ alg->base.cra_blocksize = tmpl->blocksize;
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx);
+
+ alg->base.cra_init = cc_cipher_init;
+ alg->base.cra_exit = cc_cipher_exit;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_TYPE_SKCIPHER;
+
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->data_unit = tmpl->data_unit;
+
+ return t_alg;
+}
+
+int cc_cipher_free(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
+
+ if (cipher_handle) {
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
+ entry) {
+ crypto_unregister_skcipher(&t_alg->skcipher_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ kfree(cipher_handle);
+ drvdata->cipher_handle = NULL;
+ }
+ return 0;
+}
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_cipher_handle *cipher_handle;
+ struct cc_crypto_alg *t_alg;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = -ENOMEM;
+ int alg;
+
+ cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
+ if (!cipher_handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cipher_handle->alg_list);
+ drvdata->cipher_handle = cipher_handle;
+
+ /* Linux crypto */
+ dev_dbg(dev, "Number of algorithms = %zu\n",
+ ARRAY_SIZE(skcipher_algs));
+ for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
+ if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
+ continue;
+
+ dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
+ t_alg = cc_create_alg(&skcipher_algs[alg], dev);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ skcipher_algs[alg].driver_name);
+ goto fail0;
+ }
+ t_alg->drvdata = drvdata;
+
+ dev_dbg(dev, "registering %s\n",
+ skcipher_algs[alg].driver_name);
+ rc = crypto_register_skcipher(&t_alg->skcipher_alg);
+ dev_dbg(dev, "%s alg registration rc = %x\n",
+ t_alg->skcipher_alg.base.cra_driver_name, rc);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
+ kfree(t_alg);
+ goto fail0;
+ } else {
+ list_add_tail(&t_alg->entry,
+ &cipher_handle->alg_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
+ }
+ }
+ return 0;
+
+fail0:
+ cc_cipher_free(drvdata);
+ return rc;
+}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 000000000000..2a2a6f46c515
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct cipher_req_ctx {
+ struct async_gen_req_ctx gen_ctx;
+ enum cc_req_dma_buf_type dma_buf_type;
+ u32 in_nents;
+ u32 in_mlli_nents;
+ u32 out_nents;
+ u32 out_mlli_nents;
+ u8 *backup_info; /*store iv for generated IV flow*/
+ u8 *iv;
+ bool is_giv;
+ struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+struct arm_hw_key_info {
+ int hw_key1;
+ int hw_key2;
+};
+
+/*
+ * This is a stub function that will replaced when we
+ * implement secure keys
+ */
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ return false;
+}
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
new file mode 100644
index 000000000000..e032544f4e31
--- /dev/null
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef _CC_CRYPTO_CTX_H_
+#define _CC_CRYPTO_CTX_H_
+
+#include <linux/types.h>
+
+#define CC_DRV_DES_IV_SIZE 8
+#define CC_DRV_DES_BLOCK_SIZE 8
+
+#define CC_DRV_DES_ONE_KEY_SIZE 8
+#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
+#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
+#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
+
+#define CC_AES_IV_SIZE 16
+#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
+
+#define CC_AES_BLOCK_SIZE 16
+#define CC_AES_BLOCK_SIZE_WORDS 4
+
+#define CC_AES_128_BIT_KEY_SIZE 16
+#define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2)
+#define CC_AES_192_BIT_KEY_SIZE 24
+#define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2)
+#define CC_AES_256_BIT_KEY_SIZE 32
+#define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2)
+#define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE
+#define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2)
+
+#define CC_MD5_DIGEST_SIZE 16
+#define CC_SHA1_DIGEST_SIZE 20
+#define CC_SHA224_DIGEST_SIZE 28
+#define CC_SHA256_DIGEST_SIZE 32
+#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
+#define CC_SHA384_DIGEST_SIZE 48
+#define CC_SHA512_DIGEST_SIZE 64
+
+#define CC_SHA1_BLOCK_SIZE 64
+#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
+#define CC_MD5_BLOCK_SIZE 64
+#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA224_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA1_224_256_BLOCK_SIZE 64
+#define CC_SHA384_BLOCK_SIZE 128
+#define CC_SHA512_BLOCK_SIZE 128
+
+#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
+#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
+
+#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
+
+#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+
+enum drv_engine_type {
+ DRV_ENGINE_NULL = 0,
+ DRV_ENGINE_AES = 1,
+ DRV_ENGINE_DES = 2,
+ DRV_ENGINE_HASH = 3,
+ DRV_ENGINE_RC4 = 4,
+ DRV_ENGINE_DOUT = 5,
+ DRV_ENGINE_RESERVE32B = S32_MAX,
+};
+
+enum drv_crypto_alg {
+ DRV_CRYPTO_ALG_NULL = -1,
+ DRV_CRYPTO_ALG_AES = 0,
+ DRV_CRYPTO_ALG_DES = 1,
+ DRV_CRYPTO_ALG_HASH = 2,
+ DRV_CRYPTO_ALG_C2 = 3,
+ DRV_CRYPTO_ALG_HMAC = 4,
+ DRV_CRYPTO_ALG_AEAD = 5,
+ DRV_CRYPTO_ALG_BYPASS = 6,
+ DRV_CRYPTO_ALG_NUM = 7,
+ DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
+};
+
+enum drv_crypto_direction {
+ DRV_CRYPTO_DIRECTION_NULL = -1,
+ DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
+ DRV_CRYPTO_DIRECTION_DECRYPT = 1,
+ DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
+};
+
+enum drv_cipher_mode {
+ DRV_CIPHER_NULL_MODE = -1,
+ DRV_CIPHER_ECB = 0,
+ DRV_CIPHER_CBC = 1,
+ DRV_CIPHER_CTR = 2,
+ DRV_CIPHER_CBC_MAC = 3,
+ DRV_CIPHER_XTS = 4,
+ DRV_CIPHER_XCBC_MAC = 5,
+ DRV_CIPHER_OFB = 6,
+ DRV_CIPHER_CMAC = 7,
+ DRV_CIPHER_CCM = 8,
+ DRV_CIPHER_CBC_CTS = 11,
+ DRV_CIPHER_GCTR = 12,
+ DRV_CIPHER_ESSIV = 13,
+ DRV_CIPHER_BITLOCKER = 14,
+ DRV_CIPHER_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_mode {
+ DRV_HASH_NULL = -1,
+ DRV_HASH_SHA1 = 0,
+ DRV_HASH_SHA256 = 1,
+ DRV_HASH_SHA224 = 2,
+ DRV_HASH_SHA512 = 3,
+ DRV_HASH_SHA384 = 4,
+ DRV_HASH_MD5 = 5,
+ DRV_HASH_CBC_MAC = 6,
+ DRV_HASH_XCBC_MAC = 7,
+ DRV_HASH_CMAC = 8,
+ DRV_HASH_MODE_NUM = 9,
+ DRV_HASH_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_hw_mode {
+ DRV_HASH_HW_MD5 = 0,
+ DRV_HASH_HW_SHA1 = 1,
+ DRV_HASH_HW_SHA256 = 2,
+ DRV_HASH_HW_SHA224 = 10,
+ DRV_HASH_HW_SHA512 = 4,
+ DRV_HASH_HW_SHA384 = 12,
+ DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_RESERVE32B = S32_MAX
+};
+
+#endif /* _CC_CRYPTO_CTX_H_ */
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
new file mode 100644
index 000000000000..08f8db489cf0
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+struct cc_debugfs_ctx {
+ struct dentry *dir;
+};
+
+#define CC_DEBUG_REG(_X) { \
+ .name = __stringify(_X),\
+ .offset = CC_REG(_X) \
+ }
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 debug_regs[] = {
+ CC_DEBUG_REG(HOST_SIGNATURE),
+ CC_DEBUG_REG(HOST_IRR),
+ CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+ CC_DEBUG_REG(AXIM_MON_ERR),
+ CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+ CC_DEBUG_REG(HOST_IMR),
+ CC_DEBUG_REG(AXIM_CFG),
+ CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+ CC_DEBUG_REG(HOST_VERSION),
+ CC_DEBUG_REG(GPR_HOST),
+ CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+int __init cc_debugfs_global_init(void)
+{
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+
+ return !cc_debugfs_dir;
+}
+
+void __exit cc_debugfs_global_fini(void)
+{
+ debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_debugfs_ctx *ctx;
+ struct debugfs_regset32 *regset;
+ struct dentry *file;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = debug_regs;
+ regset->nregs = ARRAY_SIZE(debug_regs);
+ regset->base = drvdata->cc_base;
+
+ ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+ if (!ctx->dir)
+ return -ENFILE;
+
+ file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ if (!file) {
+ debugfs_remove(ctx->dir);
+ return -ENFILE;
+ }
+
+ file = debugfs_create_bool("coherent", 0400, ctx->dir,
+ &drvdata->coherent);
+
+ if (!file) {
+ debugfs_remove_recursive(ctx->dir);
+ return -ENFILE;
+ }
+
+ drvdata->debugfs = ctx;
+
+ return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
+
+ debugfs_remove_recursive(ctx->dir);
+}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
new file mode 100644
index 000000000000..5b5320eca7d2
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline int cc_debugfs_global_init(void)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
new file mode 100644
index 000000000000..89ce013ae093
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/crypto.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_ivgen.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+struct cc_hw_data {
+ char *name;
+ enum cc_hw_rev rev;
+ u32 sig;
+};
+
+/* Hardware revisions defs. */
+
+static const struct cc_hw_data cc712_hw = {
+ .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U
+};
+
+static const struct cc_hw_data cc710_hw = {
+ .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U
+};
+
+static const struct cc_hw_data cc630p_hw = {
+ .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
+};
+
+static const struct of_device_id arm_ccree_dev_of_match[] = {
+ { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
+ { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
+ { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
+{
+ char prefix[64];
+
+ if (!buf)
+ return;
+
+ snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+ len, false);
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 irr;
+ u32 imr;
+
+ /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+ if (irr == 0) { /* Probably shared interrupt line */
+ dev_err(dev, "Got interrupt with empty IRR\n");
+ return IRQ_NONE;
+ }
+ imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
+
+ drvdata->irq = irr;
+ /* Completion interrupt - most probable */
+ if (irr & CC_COMP_IRQ_MASK) {
+ /* Mask AXI completion interrupt - will be unmasked in
+ * Deferred service handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
+ irr &= ~CC_COMP_IRQ_MASK;
+ complete_request(drvdata);
+ }
+#ifdef CONFIG_CRYPTO_FIPS
+ /* TEE FIPS interrupt */
+ if (irr & CC_GPR0_IRQ_MASK) {
+ /* Mask interrupt - will be unmasked in Deferred service
+ * handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+ irr &= ~CC_GPR0_IRQ_MASK;
+ fips_handler(drvdata);
+ }
+#endif
+ /* AXI error interrupt */
+ if (irr & CC_AXI_ERR_IRQ_MASK) {
+ u32 axi_err;
+
+ /* Read the AXI error ID */
+ axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
+ dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
+ axi_err);
+
+ irr &= ~CC_AXI_ERR_IRQ_MASK;
+ }
+
+ if (irr) {
+ dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
+{
+ unsigned int val, cache_params;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
+
+ /* Unmask relevant interrupt cause */
+ val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ val |= CC_GPR0_IRQ_MASK;
+
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
+
+ cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
+
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+
+ if (is_probe)
+ dev_info(dev, "Cache params previous: 0x%08X\n", val);
+
+ cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+
+ if (is_probe)
+ dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
+ val, cache_params);
+
+ return 0;
+}
+
+static int init_cc_resources(struct platform_device *plat_dev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ struct cc_drvdata *new_drvdata;
+ struct device *dev = &plat_dev->dev;
+ struct device_node *np = dev->of_node;
+ u32 signature_val;
+ u64 dma_mask;
+ const struct cc_hw_data *hw_rev;
+ const struct of_device_id *dev_id;
+ int rc = 0;
+
+ new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
+ if (!new_drvdata)
+ return -ENOMEM;
+
+ dev_id = of_match_node(arm_ccree_dev_of_match, np);
+ if (!dev_id)
+ return -ENODEV;
+
+ hw_rev = (struct cc_hw_data *)dev_id->data;
+ new_drvdata->hw_rev_name = hw_rev->name;
+ new_drvdata->hw_rev = hw_rev->rev;
+
+ if (hw_rev->rev >= CC_HW_REV_712) {
+ new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
+ new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
+ } else {
+ new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
+ new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
+ }
+
+ platform_set_drvdata(plat_dev, new_drvdata);
+ new_drvdata->plat_dev = plat_dev;
+
+ new_drvdata->clk = of_clk_get(np, 0);
+ new_drvdata->coherent = of_dma_is_coherent(np);
+
+ /* Get device resources */
+ /* First CC registers space */
+ req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ /* Map registers space */
+ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(new_drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(new_drvdata->cc_base);
+ }
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, new_drvdata->cc_base);
+
+ /* Then IRQ */
+ new_drvdata->irq = platform_get_irq(plat_dev, 0);
+ if (new_drvdata->irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return new_drvdata->irq;
+ }
+
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "ccree", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ return rc;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+
+ init_completion(&new_drvdata->hw_queue_avail);
+
+ if (!plat_dev->dev.dma_mask)
+ plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
+
+ dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+ while (dma_mask > 0x7fffffffUL) {
+ if (dma_supported(&plat_dev->dev, dma_mask)) {
+ rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+ if (!rc)
+ break;
+ }
+ dma_mask >>= 1;
+ }
+
+ if (rc) {
+ dev_err(dev, "Failed in dma_set_mask, mask=%pad\n", &dma_mask);
+ return rc;
+ }
+
+ rc = cc_clk_on(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock");
+ return rc;
+ }
+
+ /* Verify correct mapping */
+ signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
+ if (signature_val != hw_rev->sig) {
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+
+ /* Display HW versions */
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
+ hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
+ DRV_MODULE_VERSION);
+
+ rc = init_cc_regs(new_drvdata, true);
+ if (rc) {
+ dev_err(dev, "init_cc_regs failed\n");
+ goto post_clk_err;
+ }
+
+ rc = cc_debugfs_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed registering debugfs interface\n");
+ goto post_regs_err;
+ }
+
+ rc = cc_fips_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ goto post_debugfs_err;
+ }
+ rc = cc_sram_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_sram_mgr_init failed\n");
+ goto post_fips_init_err;
+ }
+
+ new_drvdata->mlli_sram_addr =
+ cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
+ dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
+ rc = -ENOMEM;
+ goto post_sram_mgr_err;
+ }
+
+ rc = cc_req_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_req_mgr_init failed\n");
+ goto post_sram_mgr_err;
+ }
+
+ rc = cc_buffer_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "buffer_mgr_init failed\n");
+ goto post_req_mgr_err;
+ }
+
+ rc = cc_pm_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_init failed\n");
+ goto post_buf_mgr_err;
+ }
+
+ rc = cc_ivgen_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_ivgen_init failed\n");
+ goto post_power_mgr_err;
+ }
+
+ /* Allocate crypto algs */
+ rc = cc_cipher_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_cipher_alloc failed\n");
+ goto post_ivgen_err;
+ }
+
+ /* hash must be allocated before aead since hash exports APIs */
+ rc = cc_hash_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_hash_alloc failed\n");
+ goto post_cipher_err;
+ }
+
+ rc = cc_aead_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_aead_alloc failed\n");
+ goto post_hash_err;
+ }
+
+ /* If we got here and FIPS mode is enabled
+ * it means all FIPS test passed, so let TEE
+ * know we're good.
+ */
+ cc_set_ree_fips_status(new_drvdata, true);
+
+ return 0;
+
+post_hash_err:
+ cc_hash_free(new_drvdata);
+post_cipher_err:
+ cc_cipher_free(new_drvdata);
+post_ivgen_err:
+ cc_ivgen_fini(new_drvdata);
+post_power_mgr_err:
+ cc_pm_fini(new_drvdata);
+post_buf_mgr_err:
+ cc_buffer_mgr_fini(new_drvdata);
+post_req_mgr_err:
+ cc_req_mgr_fini(new_drvdata);
+post_sram_mgr_err:
+ cc_sram_mgr_fini(new_drvdata);
+post_fips_init_err:
+ cc_fips_fini(new_drvdata);
+post_debugfs_err:
+ cc_debugfs_fini(new_drvdata);
+post_regs_err:
+ fini_cc_regs(new_drvdata);
+post_clk_err:
+ cc_clk_off(new_drvdata);
+ return rc;
+}
+
+void fini_cc_regs(struct cc_drvdata *drvdata)
+{
+ /* Mask all interrupts */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
+}
+
+static void cleanup_cc_resources(struct platform_device *plat_dev)
+{
+ struct cc_drvdata *drvdata =
+ (struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+ cc_aead_free(drvdata);
+ cc_hash_free(drvdata);
+ cc_cipher_free(drvdata);
+ cc_ivgen_fini(drvdata);
+ cc_pm_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
+ cc_req_mgr_fini(drvdata);
+ cc_sram_mgr_fini(drvdata);
+ cc_fips_fini(drvdata);
+ cc_debugfs_fini(drvdata);
+ fini_cc_regs(drvdata);
+ cc_clk_off(drvdata);
+}
+
+int cc_clk_on(struct cc_drvdata *drvdata)
+{
+ struct clk *clk = drvdata->clk;
+ int rc;
+
+ if (IS_ERR(clk))
+ /* Not all devices have a clock associated with CCREE */
+ return 0;
+
+ rc = clk_prepare_enable(clk);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void cc_clk_off(struct cc_drvdata *drvdata)
+{
+ struct clk *clk = drvdata->clk;
+
+ if (IS_ERR(clk))
+ /* Not all devices have a clock associated with CCREE */
+ return;
+
+ clk_disable_unprepare(clk);
+}
+
+static int ccree_probe(struct platform_device *plat_dev)
+{
+ int rc;
+ struct device *dev = &plat_dev->dev;
+
+ /* Map registers space */
+ rc = init_cc_resources(plat_dev);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "ARM ccree device initialized\n");
+
+ return 0;
+}
+
+static int ccree_remove(struct platform_device *plat_dev)
+{
+ struct device *dev = &plat_dev->dev;
+
+ dev_dbg(dev, "Releasing ccree resources...\n");
+
+ cleanup_cc_resources(plat_dev);
+
+ dev_info(dev, "ARM ccree device terminated\n");
+
+ return 0;
+}
+
+static struct platform_driver ccree_driver = {
+ .driver = {
+ .name = "ccree",
+ .of_match_table = arm_ccree_dev_of_match,
+#ifdef CONFIG_PM
+ .pm = &ccree_pm,
+#endif
+ },
+ .probe = ccree_probe,
+ .remove = ccree_remove,
+};
+
+static int __init ccree_init(void)
+{
+ int ret;
+
+ cc_hash_global_init();
+
+ ret = cc_debugfs_global_init();
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&ccree_driver);
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+ platform_driver_unregister(&ccree_driver);
+ cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
new file mode 100644
index 000000000000..2048fdeb9579
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Registers definitions from shared/hw/ree_include */
+#include "cc_host_regs.h"
+#define CC_DEV_SHA_MAX 512
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "4.0"
+
+enum cc_hw_rev {
+ CC_HW_REV_630 = 630,
+ CC_HW_REV_710 = 710,
+ CC_HW_REV_712 = 712
+};
+
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 400
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES 3
+struct cc_crypto_req {
+ void (*user_cb)(struct device *dev, void *req, int err);
+ void *user_arg;
+ dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
+ /* For the first 'ivgen_dma_addr_len' addresses of this array,
+ * generated IV would be placed in it by send_request().
+ * Same generated IV for all addresses!
+ */
+ /* Amount of 'ivgen_dma_addr' elements to be filled. */
+ unsigned int ivgen_dma_addr_len;
+ /* The generated IV size required, 8/16 B allowed. */
+ unsigned int ivgen_size;
+ struct completion seq_compl; /* request completion */
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base: virt address of the CC registers
+ * @irq: device IRQ number
+ * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver: SeP loaded firmware version
+ */
+struct cc_drvdata {
+ void __iomem *cc_base;
+ int irq;
+ u32 irq_mask;
+ u32 fw_ver;
+ struct completion hw_queue_avail; /* wait for HW queue availability */
+ struct platform_device *plat_dev;
+ cc_sram_addr_t mlli_sram_addr;
+ void *buff_mgr_handle;
+ void *cipher_handle;
+ void *hash_handle;
+ void *aead_handle;
+ void *request_mgr_handle;
+ void *fips_handle;
+ void *ivgen_handle;
+ void *sram_mgr_handle;
+ void *debugfs;
+ struct clk *clk;
+ bool coherent;
+ char *hw_rev_name;
+ enum cc_hw_rev hw_rev;
+ u32 hash_len_sz;
+ u32 axim_mon_offset;
+};
+
+struct cc_crypto_alg {
+ struct list_head entry;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ unsigned int data_unit;
+ struct cc_drvdata *drvdata;
+ struct skcipher_alg skcipher_alg;
+ struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct skcipher_alg skcipher;
+ struct aead_alg aead;
+ } template_u;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ u32 min_hw_rev;
+ unsigned int data_unit;
+ struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+ dma_addr_t iv_dma_addr;
+ enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ size_t size)
+{
+ if (cc_dump_bytes)
+ __dump_byte_array(name, the_array, size);
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+ return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+}
+
+static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
+ struct cc_hw_desc *pdesc)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ set_queue_last_ind_bit(pdesc);
+}
+
+#endif /*__CC_DRIVER_H__*/
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
new file mode 100644
index 000000000000..b4d0a6d983e0
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/fips.h>
+
+#include "cc_driver.h"
+#include "cc_fips.h"
+
+static void fips_dsr(unsigned long devarg);
+
+struct cc_fips_handle {
+ struct tasklet_struct tasklet;
+};
+
+/* The function called once at driver entry point to check
+ * whether TEE FIPS error occurred.
+ */
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
+{
+ u32 reg;
+
+ reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
+ return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+}
+
+/*
+ * This function should push the FIPS REE library status towards the TEE library
+ * by writing the error state to HOST_GPR0 register.
+ */
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
+{
+ int val = CC_FIPS_SYNC_REE_STATUS;
+
+ if (drvdata->hw_rev < CC_HW_REV_712)
+ return;
+
+ val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
+
+ cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
+}
+
+void cc_fips_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_fips_handle *fips_h = drvdata->fips_handle;
+
+ if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
+ return;
+
+ /* Kill tasklet */
+ tasklet_kill(&fips_h->tasklet);
+
+ kfree(fips_h);
+ drvdata->fips_handle = NULL;
+}
+
+void fips_handler(struct cc_drvdata *drvdata)
+{
+ struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
+
+ if (drvdata->hw_rev < CC_HW_REV_712)
+ return;
+
+ tasklet_schedule(&fips_handle_ptr->tasklet);
+}
+
+static inline void tee_fips_error(struct device *dev)
+{
+ if (fips_enabled)
+ panic("ccree: TEE reported cryptographic error in fips mode!\n");
+ else
+ dev_err(dev, "TEE reported error!\n");
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 irq, state, val;
+
+ irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
+
+ if (irq) {
+ state = cc_ioread(drvdata, CC_REG(GPR_HOST));
+
+ if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
+ tee_fips_error(dev);
+ }
+
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt.
+ */
+ val = (CC_REG(HOST_IMR) & ~irq);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
+}
+
+/* The function called once at driver entry point .*/
+int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ struct cc_fips_handle *fips_h;
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (p_drvdata->hw_rev < CC_HW_REV_712)
+ return 0;
+
+ fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ if (!fips_h)
+ return -ENOMEM;
+
+ p_drvdata->fips_handle = fips_h;
+
+ dev_dbg(dev, "Initializing fips tasklet\n");
+ tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
new file mode 100644
index 000000000000..645e096a7a82
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_FIPS_H__
+#define __CC_FIPS_H__
+
+#ifdef CONFIG_CRYPTO_FIPS
+
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
+};
+
+int cc_fips_init(struct cc_drvdata *p_drvdata);
+void cc_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
+ bool ok) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
+
+#endif /* CONFIG_CRYPTO_FIPS */
+
+#endif /*__CC_FIPS_H__*/
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
new file mode 100644
index 000000000000..96ff777474d7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -0,0 +1,2296 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/internal/hash.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+
+struct cc_hash_handle {
+ cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ struct list_head hash_list;
+};
+
+static const u32 digest_len_init[] = {
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const u32 md5_init[] = {
+ SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha1_init[] = {
+ SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha224_init[] = {
+ SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
+static const u32 sha256_init[] = {
+ SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+static const u32 digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+static u64 sha384_init[] = {
+ SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
+static u64 sha512_init[] = {
+ SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static const void *cc_larval_digest(struct device *dev, u32 mode);
+
+struct cc_hash_alg {
+ struct list_head entry;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ struct ahash_alg ahash_alg;
+};
+
+struct hash_key_req_ctx {
+ u32 keylen;
+ dma_addr_t key_dma_addr;
+};
+
+/* hash per-session context */
+struct cc_hash_ctx {
+ struct cc_drvdata *drvdata;
+ /* holds the origin digest; the digest after "setkey" if HMAC,*
+ * the initial digest if HASH.
+ */
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
+
+ dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
+ dma_addr_t digest_buff_dma_addr;
+ /* use for hmac with key large then mode block size */
+ struct hash_key_req_ctx key_params;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct completion setkey_comp;
+ bool is_hmac;
+};
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+ unsigned int flow_mode, struct cc_hw_desc desc[],
+ bool is_not_last_data, unsigned int *seq_size);
+
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
+{
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
+ set_bytes_swap(desc, 1);
+ } else {
+ set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+}
+
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize)
+{
+ state->digest_result_dma_addr =
+ dma_map_single(dev, state->digest_result_buff,
+ digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
+ dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ digestsize, state->digest_result_buff,
+ &state->digest_result_dma_addr);
+
+ return 0;
+}
+
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ memset(state, 0, sizeof(*state));
+
+ if (is_hmac) {
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+ ctx->hw_mode != DRV_CIPHER_CMAC) {
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(state->digest_buff, ctx->digest_buff,
+ ctx->inter_digestsize);
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
+ memcpy(state->digest_bytes_len,
+ digest_len_sha512_init,
+ ctx->drvdata->hash_len_sz);
+ else
+ memcpy(state->digest_bytes_len, digest_len_init,
+ ctx->drvdata->hash_len_sz);
+ }
+
+ if (ctx->hash_mode != DRV_HASH_NULL) {
+ dma_sync_single_for_cpu(dev,
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff,
+ ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ }
+ } else { /*hash*/
+ /* Copy the initial digests if hash flow. */
+ const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+ memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+ }
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ state->digest_buff_dma_addr =
+ dma_map_single(dev, state->digest_buff,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ return -EINVAL;
+ }
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
+
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+ state->digest_bytes_len_dma_addr =
+ dma_map_single(dev, state->digest_bytes_len,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
+ dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_MAX_LEN_SIZE, state->digest_bytes_len);
+ goto unmap_digest_buf;
+ }
+ dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_MAX_LEN_SIZE, state->digest_bytes_len,
+ &state->digest_bytes_len_dma_addr);
+ }
+
+ if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
+ state->opad_digest_dma_addr =
+ dma_map_single(dev, state->opad_digest_buff,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
+ goto unmap_digest_len;
+ }
+ dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ &state->opad_digest_dma_addr);
+ }
+
+ return 0;
+
+unmap_digest_len:
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+unmap_digest_buf:
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->digest_buff_dma_addr = 0;
+ }
+
+ return -EINVAL;
+}
+
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &state->digest_buff_dma_addr);
+ state->digest_buff_dma_addr = 0;
+ }
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ &state->digest_bytes_len_dma_addr);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+ if (state->opad_digest_dma_addr) {
+ dma_unmap_single(dev, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ &state->opad_digest_dma_addr);
+ state->opad_digest_dma_addr = 0;
+ }
+}
+
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
+{
+ if (state->digest_result_dma_addr) {
+ dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ state->digest_result_buff,
+ &state->digest_result_dma_addr, digestsize);
+ memcpy(result, state->digest_result_buff, digestsize);
+ }
+ state->digest_result_dma_addr = 0;
+}
+
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* store the hash digest result in the context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx],
+ cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+ ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ return idx;
+}
+
+static int cc_hash_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_digest_addr =
+ cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+ int idx = 0;
+ int rc = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
+
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial
+ * digest
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ } else {
+ set_din_sram(&desc[idx], larval_digest_addr,
+ ctx->inter_digestsize);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI,
+ state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT);
+ } else {
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ if (nbytes)
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ else
+ set_cipher_do(&desc[idx], DO_PAD);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ if (is_hmac) {
+ /* HW last hash block padding (aka. "DO_PAD") */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ idx = cc_fin_hmac(desc, req, idx);
+ }
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+ struct ahash_req_ctx *state, unsigned int idx)
+{
+ /* Restore hash digest */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ return idx;
+}
+
+static int cc_hash_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac" : "hash", nbytes);
+
+ if (nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* store current hash length in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* "DO-PAD" must be enabled only when writing current length to HW */
+ hw_desc_init(&desc[idx]);
+ set_cipher_do(&desc[idx], DO_PAD);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ idx++;
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_init(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ return 0;
+}
+
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = NULL;
+ int blocksize = 0;
+ int digestsize = 0;
+ int i, idx = 0, rc = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_addr;
+ struct device *dev;
+
+ ctx = crypto_ahash_ctx(ahash);
+ dev = drvdata_to_dev(ctx->drvdata);
+ dev_dbg(dev, "start keylen: %d", keylen);
+
+ blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ digestsize = crypto_ahash_digestsize(ahash);
+
+ larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+
+ /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
+ * any NON-ZERO value utilizes HMAC flow
+ */
+ ctx->key_params.keylen = keylen;
+ ctx->key_params.key_dma_addr = 0;
+ ctx->is_hmac = true;
+
+ if (keylen) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr,
+ ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ digestsize),
+ (blocksize - digestsize), NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen)) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ keylen), (blocksize - keylen),
+ NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, blocksize);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ goto out;
+ }
+
+ /* calc derived HMAC key */
+ for (idx = 0, i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ blocksize, NS_BIT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+ * of the first HASH "update" state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (i > 0) /* Not first iteration */
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ else /* First iteration */
+ set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+out:
+ if (rc)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ if (ctx->key_params.key_dma_addr) {
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ }
+ return rc;
+}
+
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ int rc = 0;
+ unsigned int idx = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ ctx->is_hmac = true;
+ /* 1. Load the AES key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
+ keylen, NS_BIT);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[idx], keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+ if (rc)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ return rc;
+}
+
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ ctx->is_hmac = true;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->opad_tmp_keys_buff, key, keylen);
+ if (keylen == 24) {
+ memset(ctx->opad_tmp_keys_buff + 24, 0,
+ CC_AES_KEY_SIZE_MAX - 24);
+ }
+
+ dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ ctx->key_params.keylen = keylen;
+
+ return 0;
+}
+
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (ctx->digest_buff_dma_addr) {
+ dma_unmap_single(dev, ctx->digest_buff_dma_addr,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &ctx->digest_buff_dma_addr);
+ ctx->digest_buff_dma_addr = 0;
+ }
+ if (ctx->opad_tmp_keys_dma_addr) {
+ dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+ &ctx->opad_tmp_keys_dma_addr);
+ ctx->opad_tmp_keys_dma_addr = 0;
+ }
+
+ ctx->key_params.keylen = 0;
+}
+
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ ctx->key_params.keylen = 0;
+
+ ctx->digest_buff_dma_addr =
+ dma_map_single(dev, (void *)ctx->digest_buff,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ &ctx->digest_buff_dma_addr);
+
+ ctx->opad_tmp_keys_dma_addr =
+ dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ &ctx->opad_tmp_keys_dma_addr);
+
+ ctx->is_hmac = false;
+ return 0;
+
+fail:
+ cc_free_ctx(ctx);
+ return -ENOMEM;
+}
+
+static int cc_cra_init(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common *hash_alg_common =
+ container_of(tfm->__crt_alg, struct hash_alg_common, base);
+ struct ahash_alg *ahash_alg =
+ container_of(hash_alg_common, struct ahash_alg, halg);
+ struct cc_hash_alg *cc_alg =
+ container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_req_ctx));
+
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
+
+ return cc_alloc_ctx(ctx);
+}
+
+static void cc_cra_exit(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "cc_cra_exit");
+ cc_free_ctx(ctx);
+}
+
+static int cc_mac_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int rc;
+ u32 idx = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ if (req->nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ state->xcbc_count++;
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ req->nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_update_complete;
+ cc_req.user_arg = (void *)req;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_size, key_len;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+ u32 rem_cnt = *cc_hash_buf_cnt(state);
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_size = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ } else {
+ key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
+ }
+
+ dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (state->xcbc_count && rem_cnt == 0) {
+ /* Load key for ECB decryption */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ key_size, NS_BIT);
+ set_key_size_aes(&desc[idx], key_len);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Initiate decryption of block state to previous
+ * block_state-XOR-M[n]
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier: wait for axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ if (state->xcbc_count == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else if (rem_cnt > 0) {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_len = 0;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
+ if (state->xcbc_count > 0 && req->nbytes == 0) {
+ dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
+ return cc_mac_final(req);
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 key_len;
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_digest_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u8 *curr_buff = cc_hash_buf(state);
+ u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
+ const u32 tmp = CC_EXPORT_MAGIC;
+
+ memcpy(out, &tmp, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, state->digest_buff, ctx->inter_digestsize);
+ out += ctx->inter_digestsize;
+
+ memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
+ out += ctx->drvdata->hash_len_sz;
+
+ memcpy(out, &curr_buff_cnt, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, curr_buff, curr_buff_cnt);
+
+ return 0;
+}
+
+static int cc_hash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u32 tmp;
+
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp != CC_EXPORT_MAGIC)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ cc_init_req(dev, state, ctx);
+
+ memcpy(state->digest_buff, in, ctx->inter_digestsize);
+ in += ctx->inter_digestsize;
+
+ memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
+ in += ctx->drvdata->hash_len_sz;
+
+ /* Sanity check the data as much as possible */
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp > CC_MAX_HASH_BLCK_SIZE)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ state->buf_cnt[0] = tmp;
+ memcpy(state->buffers[0], in, tmp);
+
+ return 0;
+}
+
+struct cc_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char mac_name[CRYPTO_MAX_ALG_NAME];
+ char mac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ bool synchronize;
+ struct ahash_alg template_ahash;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ u32 min_hw_rev;
+};
+
+#define CC_STATE_SIZE(_x) \
+ ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+
+/* hash descriptors */
+static struct cc_hash_template driver_hash[] = {
+ //Asynchronize hash template
+ {
+ .name = "sha1",
+ .driver_name = "sha1-ccree",
+ .mac_name = "hmac(sha1)",
+ .mac_driver_name = "hmac-sha1-ccree",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .synchronize = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA1,
+ .hw_mode = DRV_HASH_HW_SHA1,
+ .inter_digestsize = SHA1_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-ccree",
+ .mac_name = "hmac(sha256)",
+ .mac_driver_name = "hmac-sha256-ccree",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
+ },
+ },
+ .hash_mode = DRV_HASH_SHA256,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "sha224",
+ .driver_name = "sha224-ccree",
+ .mac_name = "hmac(sha224)",
+ .mac_driver_name = "hmac-sha224-ccree",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA224,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "sha384",
+ .driver_name = "sha384-ccree",
+ .mac_name = "hmac(sha384)",
+ .mac_driver_name = "hmac-sha384-ccree",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA384,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "sha512",
+ .driver_name = "sha512-ccree",
+ .mac_name = "hmac(sha512)",
+ .mac_driver_name = "hmac-sha512-ccree",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA512,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "md5",
+ .driver_name = "md5-ccree",
+ .mac_name = "hmac(md5)",
+ .mac_driver_name = "hmac-md5-ccree",
+ .blocksize = MD5_HMAC_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_MD5,
+ .hw_mode = DRV_HASH_HW_MD5,
+ .inter_digestsize = MD5_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .mac_name = "xcbc(aes)",
+ .mac_driver_name = "xcbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_xcbc_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_XCBC_MAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .mac_name = "cmac(aes)",
+ .mac_driver_name = "cmac-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_cmac_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_CMAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+};
+
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+ struct device *dev, bool keyed)
+{
+ struct cc_hash_alg *t_crypto_alg;
+ struct crypto_alg *alg;
+ struct ahash_alg *halg;
+
+ t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
+ if (!t_crypto_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_crypto_alg->ahash_alg = template->template_ahash;
+ halg = &t_crypto_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_driver_name);
+ } else {
+ halg->setkey = NULL;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_priority = CC_CRA_PRIO;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_exit = cc_cra_exit;
+
+ alg->cra_init = cc_cra_init;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_crypto_alg->hash_mode = template->hash_mode;
+ t_crypto_alg->hw_mode = template->hw_mode;
+ t_crypto_alg->inter_digestsize = template->inter_digestsize;
+
+ return t_crypto_alg;
+}
+
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+ cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ unsigned int larval_seq_len = 0;
+ struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+ int rc = 0;
+
+ /* Copy-to-sram digest-len */
+ cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_init);
+ larval_seq_len = 0;
+
+ if (large_sha_supported) {
+ /* Copy-to-sram digest-len for sha384/512 */
+ cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_sha512_init);
+ larval_seq_len = 0;
+ }
+
+ /* The initial digests offset */
+ hash_handle->larval_digest_sram_addr = sram_buff_ofs;
+
+ /* Copy-to-sram initial SHA* digests */
+ cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(md5_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha1_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha224_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha256_init);
+ larval_seq_len = 0;
+
+ if (large_sha_supported) {
+ cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha384_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ }
+
+init_digest_const_err:
+ return rc;
+}
+
+static void __init cc_swap_dwords(u32 *buf, unsigned long size)
+{
+ int i;
+ u32 tmp;
+
+ for (i = 0; i < size; i += 2) {
+ tmp = buf[i];
+ buf[i] = buf[i + 1];
+ buf[i + 1] = tmp;
+ }
+}
+
+/*
+ * Due to the way the HW works we need to swap every
+ * double word in the SHA384 and SHA512 larval hashes
+ */
+void __init cc_hash_global_init(void)
+{
+ cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
+ cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle;
+ cc_sram_addr_t sram_buff;
+ u32 sram_size_to_alloc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+ int alg;
+
+ hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
+ if (!hash_handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hash_handle->hash_list);
+ drvdata->hash_handle = hash_handle;
+
+ sram_size_to_alloc = sizeof(digest_len_init) +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init);
+
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ sram_size_to_alloc += sizeof(digest_len_sha512_init) +
+ sizeof(sha384_init) + sizeof(sha512_init);
+
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
+ if (sram_buff == NULL_SRAM_ADDR) {
+ dev_err(dev, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* The initial digest-len offset */
+ hash_handle->digest_len_sram_addr = sram_buff;
+
+ /*must be set before the alg registration as it is being used there*/
+ rc = cc_init_hash_sram(drvdata);
+ if (rc) {
+ dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
+ goto fail;
+ }
+
+ /* ahash registration */
+ for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
+ struct cc_hash_alg *t_alg;
+ int hw_mode = driver_hash[alg].hw_mode;
+
+ /* We either support both HASH and MAC or none */
+ if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
+ continue;
+
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+
+ if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+ hw_mode == DRV_CIPHER_CMAC)
+ continue;
+
+ /* register hash version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+ }
+
+ return 0;
+
+fail:
+ kfree(drvdata->hash_handle);
+ drvdata->hash_handle = NULL;
+ return rc;
+}
+
+int cc_hash_free(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_alg *t_hash_alg, *hash_n;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+
+ if (hash_handle) {
+ list_for_each_entry_safe(t_hash_alg, hash_n,
+ &hash_handle->hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+
+ kfree(hash_handle);
+ drvdata->hash_handle = NULL;
+ }
+ return 0;
+}
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup CMAC Key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen), NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+ struct cc_hash_ctx *ctx, unsigned int flow_mode,
+ struct cc_hw_desc desc[], bool is_not_last_data,
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(areq_ctx->curr_sg),
+ areq_ctx->curr_sg->length, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ } else {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ dev_dbg(dev, " NULL mode\n");
+ /* nothing to build */
+ return;
+ }
+ /* bypass */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[idx], BYPASS);
+ idx++;
+ /* process */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI,
+ ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ }
+ if (is_not_last_data)
+ set_din_not_last_indication(&desc[(idx - 1)]);
+ /* return updated desc sequence size */
+ *seq_size = idx;
+}
+
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+ switch (mode) {
+ case DRV_HASH_MD5:
+ return md5_init;
+ case DRV_HASH_SHA1:
+ return sha1_init;
+ case DRV_HASH_SHA224:
+ return sha224_init;
+ case DRV_HASH_SHA256:
+ return sha256_init;
+ case DRV_HASH_SHA384:
+ return sha384_init;
+ case DRV_HASH_SHA512:
+ return sha512_init;
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ return md5_init;
+ }
+}
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct device *dev = drvdata_to_dev(_drvdata);
+
+ switch (mode) {
+ case DRV_HASH_NULL:
+ break; /*Ignore*/
+ case DRV_HASH_MD5:
+ return (hash_handle->larval_digest_sram_addr);
+ case DRV_HASH_SHA1:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init));
+ case DRV_HASH_SHA224:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init));
+ case DRV_HASH_SHA256:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init));
+ case DRV_HASH_SHA384:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init));
+ case DRV_HASH_SHA512:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init) +
+ sizeof(sha384_init));
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ }
+
+ /*This is valid wrong value to avoid kernel crash*/
+ return hash_handle->larval_digest_sram_addr;
+}
+
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+
+ switch (mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA224:
+ case DRV_HASH_SHA256:
+ case DRV_HASH_MD5:
+ return digest_len_addr;
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ case DRV_HASH_SHA512:
+ return digest_len_addr + sizeof(digest_len_init);
+#endif
+ default:
+ return digest_len_addr; /*to avoid kernel crash*/
+ }
+}
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
new file mode 100644
index 000000000000..2e5bf8b0bbb6
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#define HASH_LEN_SIZE_712 16
+#define HASH_LEN_SIZE_630 8
+#define HASH_MAX_LEN_SIZE HASH_LEN_SIZE_712
+#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+ u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_bytes_len[HASH_MAX_LEN_SIZE] ____cacheline_aligned;
+ struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+ enum cc_req_dma_buf_type data_dma_buf_type;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ u32 buf_cnt[2];
+ u32 buff_index;
+ u32 xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ u32 in_nents;
+ u32 mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
+ */
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+void cc_hash_global_init(void);
+
+#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
new file mode 100644
index 000000000000..f51001898ca1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HOST_H__
+#define __CC_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
+#define CC_HOST_IMR_REG_OFFSET 0xA04UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_REG_OFFSET 0xA08UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
+#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
+#define CC_GPR_HOST_REG_OFFSET 0xA74UL
+#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
+#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
+#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
+#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
+#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
+#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
+
+#endif //__CC_HOST_H__
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
new file mode 100644
index 000000000000..a091ae57f902
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HW_QUEUE_DEFS_H__
+#define __CC_HW_QUEUE_DEFS_H__
+
+#include <linux/types.h>
+
+#include "cc_kernel_regs.h"
+#include <linux/bitfield.h>
+
+/******************************************************************************
+ * DEFINITIONS
+ ******************************************************************************/
+
+#define HW_DESC_SIZE_WORDS 6
+/* Define max. available slots in HW queue */
+#define HW_QUEUE_SLOTS_MAX 15
+
+#define CC_REG_LOW(word, name) \
+ (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
+
+#define CC_REG_HIGH(word, name) \
+ (CC_REG_LOW(word, name) + \
+ CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
+
+#define CC_GENMASK(word, name) \
+ GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
+
+#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD2_VALUE CC_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
+
+/******************************************************************************
+ * TYPE DEFINITIONS
+ ******************************************************************************/
+
+struct cc_hw_desc {
+ union {
+ u32 word[HW_DESC_SIZE_WORDS];
+ u16 hword[HW_DESC_SIZE_WORDS * 2];
+ };
+};
+
+enum cc_axi_sec {
+ AXI_SECURE = 0,
+ AXI_NOT_SECURE = 1
+};
+
+enum cc_desc_direction {
+ DESC_DIRECTION_ILLEGAL = -1,
+ DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
+ DESC_DIRECTION_DECRYPT_DECRYPT = 1,
+ DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DESC_DIRECTION_END = S32_MAX,
+};
+
+enum cc_dma_mode {
+ DMA_MODE_NULL = -1,
+ NO_DMA = 0,
+ DMA_SRAM = 1,
+ DMA_DLLI = 2,
+ DMA_MLLI = 3,
+ DMA_MODE_END = S32_MAX,
+};
+
+enum cc_flow_mode {
+ FLOW_MODE_NULL = -1,
+ /* data flows */
+ BYPASS = 0,
+ DIN_AES_DOUT = 1,
+ AES_to_HASH = 2,
+ AES_and_HASH = 3,
+ DIN_DES_DOUT = 4,
+ DES_to_HASH = 5,
+ DES_and_HASH = 6,
+ DIN_HASH = 7,
+ DIN_HASH_and_BYPASS = 8,
+ AESMAC_and_BYPASS = 9,
+ AES_to_HASH_and_DOUT = 10,
+ DIN_RC4_DOUT = 11,
+ DES_to_HASH_and_DOUT = 12,
+ AES_to_AES_to_HASH_and_DOUT = 13,
+ AES_to_AES_to_HASH = 14,
+ AES_to_HASH_and_AES = 15,
+ DIN_AES_AESMAC = 17,
+ HASH_to_DOUT = 18,
+ /* setup flows */
+ S_DIN_to_AES = 32,
+ S_DIN_to_AES2 = 33,
+ S_DIN_to_DES = 34,
+ S_DIN_to_RC4 = 35,
+ S_DIN_to_HASH = 37,
+ S_AES_to_DOUT = 38,
+ S_AES2_to_DOUT = 39,
+ S_RC4_to_DOUT = 41,
+ S_DES_to_DOUT = 42,
+ S_HASH_to_DOUT = 43,
+ SET_FLOW_ID = 44,
+ FLOW_MODE_END = S32_MAX,
+};
+
+enum cc_setup_op {
+ SETUP_LOAD_NOP = 0,
+ SETUP_LOAD_STATE0 = 1,
+ SETUP_LOAD_STATE1 = 2,
+ SETUP_LOAD_STATE2 = 3,
+ SETUP_LOAD_KEY0 = 4,
+ SETUP_LOAD_XEX_KEY = 5,
+ SETUP_WRITE_STATE0 = 8,
+ SETUP_WRITE_STATE1 = 9,
+ SETUP_WRITE_STATE2 = 10,
+ SETUP_WRITE_STATE3 = 11,
+ SETUP_OP_END = S32_MAX,
+};
+
+enum cc_hash_conf_pad {
+ HASH_PADDING_DISABLED = 0,
+ HASH_PADDING_ENABLED = 1,
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+ HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
+};
+
+enum cc_aes_mac_selector {
+ AES_SK = 1,
+ AES_CMAC_INIT = 2,
+ AES_CMAC_SIZE0 = 3,
+ AES_MAC_END = S32_MAX,
+};
+
+#define HW_KEY_MASK_CIPHER_DO 0x3
+#define HW_KEY_SHIFT_CIPHER_CFG2 2
+
+/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
+/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
+enum cc_hw_crypto_key {
+ USER_KEY = 0, /* 0x0000 */
+ ROOT_KEY = 1, /* 0x0001 */
+ PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */
+ SESSION_KEY = 3, /* 0x0011 */
+ RESERVED_KEY = 4, /* NA */
+ PLATFORM_KEY = 5, /* 0x0101 */
+ CUSTOMER_KEY = 6, /* 0x0110 */
+ KFDE0_KEY = 7, /* 0x0111 */
+ KFDE1_KEY = 9, /* 0x1001 */
+ KFDE2_KEY = 10, /* 0x1010 */
+ KFDE3_KEY = 11, /* 0x1011 */
+ END_OF_KEYS = S32_MAX,
+};
+
+enum cc_hw_aes_key_size {
+ AES_128_KEY = 0,
+ AES_192_KEY = 1,
+ AES_256_KEY = 2,
+ END_OF_AES_KEYS = S32_MAX,
+};
+
+enum cc_hash_cipher_pad {
+ DO_NOT_PAD = 0,
+ DO_PAD = 1,
+ HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
+};
+
+/*****************************/
+/* Descriptor packing macros */
+/*****************************/
+
+/*
+ * Init a HW descriptor struct
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void hw_desc_init(struct cc_hw_desc *pdesc)
+{
+ memset(pdesc, 0, sizeof(struct cc_hw_desc));
+}
+
+/*
+ * Indicates the end of current HW descriptors flow and release the HW engines.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
+}
+
+/*
+ * Set the DIN field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: dinAdr DIN address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_din_type(struct cc_hw_desc *pdesc,
+ enum cc_dma_mode dma_mode, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec)
+{
+ pdesc->word[0] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
+#endif
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
+ FIELD_PREP(WORD1_DIN_SIZE, size) |
+ FIELD_PREP(WORD1_NS_BIT, axi_sec);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size: Data size in bytes
+ */
+static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[0] = addr;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to SRAM mode.
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size Data size in bytes
+ */
+static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size)
+{
+ pdesc->word[0] = (u32)addr;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
+ FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to CONST mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: DIN const value
+ * @size: Data size in bytes
+ */
+static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
+{
+ pdesc->word[0] = val;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
+ FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
+ FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/*
+ * Set the DIN not last input data indicator
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_type(struct cc_hw_desc *pdesc,
+ enum cc_dma_mode dma_mode, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec)
+{
+ pdesc->word[2] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
+#endif
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
+ FIELD_PREP(WORD3_DOUT_SIZE, size) |
+ FIELD_PREP(WORD3_NS_BIT, axi_sec);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec,
+ u32 last_ind)
+{
+ set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec,
+ bool last_ind)
+{
+ set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @write_enable: Enables a write operation to a register
+ */
+static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
+ u32 size, bool write_enable)
+{
+ pdesc->word[2] = addr;
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
+ FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
+}
+
+/*
+ * Set the word for the XOR operation.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: xor data value
+ */
+static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
+{
+ pdesc->word[2] = val;
+}
+
+/*
+ * Sets the XOR indicator bit in the descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_xor_active(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
+}
+
+/*
+ * Select the AES engine instead of HASH engine when setting up combined mode
+ * with AES XCBC MAC
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to SRAM mode
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ */
+static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[2] = addr;
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
+ FIELD_PREP(WORD3_DOUT_SIZE, size);
+}
+
+/*
+ * Sets the data unit size for XEX mode in data_out_addr[15:0]
+ *
+ * @pdesc: pDesc pointer HW descriptor struct
+ * @size: data unit size for XEX mode
+ */
+static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
+{
+ pdesc->word[2] = size;
+}
+
+/*
+ * Set the number of rounds for Multi2 in data_out_addr[15:0]
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @num: number of rounds for Multi2
+ */
+static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
+{
+ pdesc->word[2] = num;
+}
+
+/*
+ * Set the flow mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_flow_mode(struct cc_hw_desc *pdesc,
+ enum cc_flow_mode mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
+}
+
+/*
+ * Set the cipher mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
+ enum drv_cipher_mode mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
+ enum drv_crypto_direction mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
+ enum cc_hash_conf_pad config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
+}
+
+/*
+ * Set HW key configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
+ */
+static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
+ enum cc_hw_crypto_key hw_key)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+ (hw_key & HW_KEY_MASK_CIPHER_DO)) |
+ FIELD_PREP(WORD4_CIPHER_CONF2,
+ (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
+}
+
+/*
+ * Set byte order of all setup-finalize descriptors.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
+}
+
+/*
+ * Set CMAC_SIZE0 mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
+}
+
+/*
+ * Set key size descriptor field.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
+}
+
+/*
+ * Set AES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
+{
+ set_key_size(pdesc, ((size >> 3) - 2));
+}
+
+/*
+ * Set DES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
+{
+ set_key_size(pdesc, ((size >> 3) - 1));
+}
+
+/*
+ * Set the descriptor setup mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the setup modes defined in [CC7x-DESC]
+ */
+static inline void set_setup_mode(struct cc_hw_desc *pdesc,
+ enum cc_setup_op mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
+}
+
+/*
+ * Set the descriptor cipher DO
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the cipher do defined in [CC7x-DESC]
+ */
+static inline void set_cipher_do(struct cc_hw_desc *pdesc,
+ enum cc_hash_cipher_pad config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+ (config & HW_KEY_MASK_CIPHER_DO));
+}
+
+#endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
new file mode 100644
index 000000000000..769458323394
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <crypto/ctr.h>
+#include "cc_driver.h"
+#include "cc_ivgen.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_buffer_mgr.h"
+
+/* The max. size of pool *MUST* be <= SRAM total size */
+#define CC_IVPOOL_SIZE 1024
+/* The first 32B fraction of pool are dedicated to the
+ * next encryption "key" & "IV" for pool regeneration
+ */
+#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
+#define CC_IVPOOL_GEN_SEQ_LEN 4
+
+/**
+ * struct cc_ivgen_ctx -IV pool generation context
+ * @pool: the start address of the iv-pool resides in internal RAM
+ * @ctr_key_dma: address of pool's encryption key material in internal RAM
+ * @ctr_iv_dma: address of pool's counter iv in internal RAM
+ * @next_iv_ofs: the offset to the next available IV in pool
+ * @pool_meta: virt. address of the initial enc. key/IV
+ * @pool_meta_dma: phys. address of the initial enc. key/IV
+ */
+struct cc_ivgen_ctx {
+ cc_sram_addr_t pool;
+ cc_sram_addr_t ctr_key;
+ cc_sram_addr_t ctr_iv;
+ u32 next_iv_ofs;
+ u8 *pool_meta;
+ dma_addr_t pool_meta_dma;
+};
+
+/*!
+ * Generates CC_IVPOOL_SIZE of random bytes by
+ * encrypting 0's using AES128-CTR.
+ *
+ * \param ivgen iv-pool context
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ */
+static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
+{
+ unsigned int idx = *iv_seq_len;
+
+ if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+ /* Setup key */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
+ set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+ set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
+ idx++;
+
+ /* Setup cipher state */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
+ set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+ set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
+ set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
+ idx++;
+
+ /* Perform dummy encrypt to skip first block */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
+ set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
+ set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Generate IV pool */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
+ set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
+ set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
+ idx++;
+
+ *iv_seq_len = idx; /* Update sequence length */
+
+ /* queue ordering assures pool readiness */
+ ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
+
+ return 0;
+}
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata)
+{
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+ unsigned int iv_seq_len = 0;
+ int rc;
+
+ /* Generate initial enc. key/iv */
+ get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
+
+ /* The first 32B reserved for the enc. Key/IV */
+ ivgen_ctx->ctr_key = ivgen_ctx->pool;
+ ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
+
+ /* Copy initial enc. key and IV to SRAM at a single descriptor */
+ hw_desc_init(&iv_seq[iv_seq_len]);
+ set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
+ CC_IVPOOL_META_SIZE, NS_BIT);
+ set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
+ CC_IVPOOL_META_SIZE);
+ set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
+ iv_seq_len++;
+
+ /* Generate initial pool */
+ rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
+ if (rc)
+ return rc;
+
+ /* Fire-and-forget */
+ return send_request_init(drvdata, iv_seq, iv_seq_len);
+}
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct device *device = &drvdata->plat_dev->dev;
+
+ if (!ivgen_ctx)
+ return;
+
+ if (ivgen_ctx->pool_meta) {
+ memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
+ dma_free_coherent(device, CC_IVPOOL_META_SIZE,
+ ivgen_ctx->pool_meta,
+ ivgen_ctx->pool_meta_dma);
+ }
+
+ ivgen_ctx->pool = NULL_SRAM_ADDR;
+
+ /* release "this" context */
+ kfree(ivgen_ctx);
+}
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata)
+{
+ struct cc_ivgen_ctx *ivgen_ctx;
+ struct device *device = &drvdata->plat_dev->dev;
+ int rc;
+
+ /* Allocate "this" context */
+ ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ if (!ivgen_ctx)
+ return -ENOMEM;
+
+ /* Allocate pool's header for initial enc. key/IV */
+ ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
+ &ivgen_ctx->pool_meta_dma,
+ GFP_KERNEL);
+ if (!ivgen_ctx->pool_meta) {
+ dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
+ CC_IVPOOL_META_SIZE);
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* Allocate IV pool in SRAM */
+ ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
+ if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
+ dev_err(device, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ drvdata->ivgen_handle = ivgen_ctx;
+
+ return cc_init_iv_sram(drvdata);
+
+out:
+ cc_ivgen_fini(drvdata);
+ return rc;
+}
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements
+ * of iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
+{
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ unsigned int idx = *iv_seq_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ unsigned int t;
+
+ if (iv_out_size != CC_AES_IV_SIZE &&
+ iv_out_size != CTR_RFC3686_IV_SIZE) {
+ return -EINVAL;
+ }
+ if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+
+ /* check that number of generated IV is limited to max dma address
+ * iv buffer size
+ */
+ if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+
+ for (t = 0; t < iv_out_dma_len; t++) {
+ /* Acquire IV from pool */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
+ ivgen_ctx->next_iv_ofs),
+ iv_out_size);
+ set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
+ NS_BIT, 0);
+ set_flow_mode(&iv_seq[idx], BYPASS);
+ idx++;
+ }
+
+ /* Bypass operation is proceeded by crypto sequence, hence must
+ * assure bypass-write-transaction by a memory barrier
+ */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
+ set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
+ idx++;
+
+ *iv_seq_len = idx; /* update seq length */
+
+ /* Update iv index */
+ ivgen_ctx->next_iv_ofs += iv_out_size;
+
+ if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
+ dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
+ /* pool is drained -regenerate it! */
+ return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
new file mode 100644
index 000000000000..b6ac16903dda
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_IVGEN_H__
+#define __CC_IVGEN_H__
+
+#include "cc_hw_queue_defs.h"
+
+#define CC_IVPOOL_SEQ_LEN 8
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata);
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
+ * iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
+
+#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
new file mode 100644
index 000000000000..8d7262a35156
--- /dev/null
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_CRYS_KERNEL_H__
+#define __CC_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
+#define CC_AXIM_MON_COMP8_REG_OFFSET 0xBA0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
+#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
+#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
+#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
+#endif // __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
new file mode 100644
index 000000000000..64b15ac9f1d3
--- /dev/null
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef _CC_LLI_DEFS_H_
+#define _CC_LLI_DEFS_H_
+
+#include <linux/types.h>
+
+/* Max DLLI size
+ * AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ */
+#define DLLI_SIZE_BIT_SIZE 0x18
+
+#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
+
+#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
+#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
+#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
+ (2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
+
+/* Size of entry */
+#define LLI_ENTRY_WORD_SIZE 2
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Word0[31:0] = ADDR[31:0] */
+#define LLI_WORD0_OFFSET 0
+#define LLI_LADDR_BIT_OFFSET 0
+#define LLI_LADDR_BIT_SIZE 32
+/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
+#define LLI_WORD1_OFFSET 1
+#define LLI_SIZE_BIT_OFFSET 0
+#define LLI_SIZE_BIT_SIZE 16
+#define LLI_HADDR_BIT_OFFSET 16
+#define LLI_HADDR_BIT_SIZE 16
+
+#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
+#define LLI_HADDR_MASK GENMASK( \
+ (LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
+ LLI_HADDR_BIT_OFFSET)
+
+static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
+{
+ lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+}
+
+static inline void cc_lli_set_size(u32 *lli_p, u16 size)
+{
+ lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
+}
+
+#endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
new file mode 100644
index 000000000000..d990f472e89f
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_hash.h"
+#include "cc_pm.h"
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
+int cc_pm_suspend(struct device *dev)
+{
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ rc = cc_suspend_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
+ return rc;
+ }
+ fini_cc_regs(drvdata);
+ cc_clk_off(drvdata);
+ return 0;
+}
+
+int cc_pm_resume(struct device *dev)
+{
+ int rc;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+
+ rc = cc_clk_on(drvdata);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ rc = init_cc_regs(drvdata, false);
+ if (rc) {
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
+ return rc;
+ }
+
+ rc = cc_resume_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
+ return rc;
+ }
+
+ /* must be after the queue resuming as it uses the HW queue*/
+ cc_init_hash_sram(drvdata);
+
+ cc_init_iv_sram(drvdata);
+ return 0;
+}
+
+int cc_pm_get(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (cc_req_queue_suspended(drvdata))
+ rc = pm_runtime_get_sync(dev);
+ else
+ pm_runtime_get_noresume(dev);
+
+ return rc;
+}
+
+int cc_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (!cc_req_queue_suspended(drvdata)) {
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ } else {
+ /* Something wrong happens*/
+ dev_err(dev, "request to suspend already suspended queue");
+ rc = -EBUSY;
+ }
+ return rc;
+}
+
+int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ int rc = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* must be before the enabling to avoid resdundent suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* activate the PM module */
+ rc = pm_runtime_set_active(dev);
+ if (rc)
+ return rc;
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+
+ return rc;
+}
+
+void cc_pm_fini(struct cc_drvdata *drvdata)
+{
+ pm_runtime_disable(drvdata_to_dev(drvdata));
+}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
new file mode 100644
index 000000000000..020a5403c58b
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_pm.h
+ */
+
+#ifndef __CC_POWER_MGR_H__
+#define __CC_POWER_MGR_H__
+
+#include "cc_driver.h"
+
+#define CC_SUSPEND_TIMEOUT 3000
+
+#if defined(CONFIG_PM)
+
+extern const struct dev_pm_ops ccree_pm;
+
+int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_fini(struct cc_drvdata *drvdata);
+int cc_pm_suspend(struct device *dev);
+int cc_pm_resume(struct device *dev);
+int cc_pm_get(struct device *dev);
+int cc_pm_put_suspend(struct device *dev);
+
+#else
+
+static inline int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
+static inline int cc_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_resume(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_get(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_put_suspend(struct device *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
new file mode 100644
index 000000000000..83a8aaae61c7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_pm.h"
+
+#define CC_MAX_POLL_ITER 10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN 23
+
+struct cc_req_mgr_handle {
+ /* Request manager resources */
+ unsigned int hw_queue_size; /* HW capability */
+ unsigned int min_free_hw_slots;
+ unsigned int max_used_sw_slots;
+ struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ u32 req_queue_head;
+ u32 req_queue_tail;
+ u32 axi_completed;
+ u32 q_free_slots;
+ /* This lock protects access to HW register
+ * that must be single request at a time
+ */
+ spinlock_t hw_lock;
+ struct cc_hw_desc compl_desc;
+ u8 *dummy_comp_buff;
+ dma_addr_t dummy_comp_buff_dma;
+
+ /* backlog queue */
+ struct list_head backlog;
+ unsigned int bl_len;
+ spinlock_t bl_lock; /* protect backlog queue */
+
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work compwork;
+#else
+ struct tasklet_struct comptask;
+#endif
+ bool is_runtime_suspended;
+};
+
+struct cc_bl_item {
+ struct cc_crypto_req creq;
+ struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+ unsigned int len;
+ struct list_head list;
+ bool notif;
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (!req_mgr_h)
+ return; /* Not allocated */
+
+ if (req_mgr_h->dummy_comp_buff_dma) {
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
+ req_mgr_h->dummy_comp_buff_dma);
+ }
+
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ req_mgr_h->min_free_hw_slots));
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+ flush_workqueue(req_mgr_h->workq);
+ destroy_workqueue(req_mgr_h->workq);
+#else
+ /* Kill tasklet */
+ tasklet_kill(&req_mgr_h->comptask);
+#endif
+ kzfree(req_mgr_h);
+ drvdata->request_mgr_handle = NULL;
+}
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
+ if (!req_mgr_h) {
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ drvdata->request_mgr_handle = req_mgr_h;
+
+ spin_lock_init(&req_mgr_h->hw_lock);
+ spin_lock_init(&req_mgr_h->bl_lock);
+ INIT_LIST_HEAD(&req_mgr_h->backlog);
+
+#ifdef COMP_IN_WQ
+ dev_dbg(dev, "Initializing completion workqueue\n");
+ req_mgr_h->workq = create_singlethread_workqueue("ccree");
+ if (!req_mgr_h->workq) {
+ dev_err(dev, "Failed creating work queue\n");
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+ dev_dbg(dev, "Initializing completion tasklet\n");
+ tasklet_init(&req_mgr_h->comptask, comp_handler,
+ (unsigned long)drvdata);
+#endif
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+ req_mgr_h->max_used_sw_slots = 0;
+
+ /* Allocate DMA word for "dummy" completion descriptor use */
+ req_mgr_h->dummy_comp_buff =
+ dma_alloc_coherent(dev, sizeof(u32),
+ &req_mgr_h->dummy_comp_buff_dma,
+ GFP_KERNEL);
+ if (!req_mgr_h->dummy_comp_buff) {
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ /* Init. "dummy" completion descriptor */
+ hw_desc_init(&req_mgr_h->compl_desc);
+ set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+ set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+ sizeof(u32), NS_BIT, 1);
+ set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+ set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
+
+ return 0;
+
+req_mgr_init_err:
+ cc_req_mgr_fini(drvdata);
+ return rc;
+}
+
+static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
+ unsigned int seq_len)
+{
+ int i, w;
+ void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /*
+ * We do indeed write all 6 command words to the same
+ * register. The HW supports this.
+ */
+
+ for (i = 0; i < seq_len; i++) {
+ for (w = 0; w <= 5; w++)
+ writel_relaxed(seq[i].word[w], reg);
+
+ if (cc_dump_desc)
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1],
+ seq[i].word[2], seq[i].word[3],
+ seq[i].word[4], seq[i].word[5]);
+ }
+}
+
+/*!
+ * Completion will take place if and only if user requested completion
+ * by cc_send_sync_request().
+ *
+ * \param dev
+ * \param dx_compl_h The completion event to signal
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+ int dummy)
+{
+ struct completion *this_compl = dx_compl_h;
+
+ complete(this_compl);
+}
+
+static int cc_queues_status(struct cc_drvdata *drvdata,
+ struct cc_req_mgr_handle *req_mgr_h,
+ unsigned int total_seq_len)
+{
+ unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* SW queue is checked only once as it will not
+ * be chaned during the poll because the spinlock_bh
+ * is held by the thread
+ */
+ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail) {
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ return -ENOSPC;
+ }
+
+ if (req_mgr_h->q_free_slots >= total_seq_len)
+ return 0;
+
+ /* Wait for space in HW queue. Poll constant num of iterations. */
+ for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
+ req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+
+ if (req_mgr_h->q_free_slots >= total_seq_len) {
+ /* If there is enough place return */
+ return 0;
+ }
+
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
+ }
+ /* No room in the HW queue try again later */
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
+ return -ENOSPC;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ * Need to be called with HW lock held and PM running
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param add_comp If "true": add an artificial dout DMA to mark completion
+ *
+ * \return int Returns -EINPROGRESS or error code
+ */
+static int cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp, bool ivgen)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int used_sw_slots;
+ unsigned int iv_seq_len = 0;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ if (ivgen) {
+ dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ cc_req->ivgen_dma_addr_len,
+ &cc_req->ivgen_dma_addr[0],
+ &cc_req->ivgen_dma_addr[1],
+ &cc_req->ivgen_dma_addr[2],
+ cc_req->ivgen_size);
+
+ /* Acquire IV from pool */
+ rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
+ cc_req->ivgen_dma_addr_len,
+ cc_req->ivgen_size, iv_seq, &iv_seq_len);
+
+ if (rc) {
+ dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
+ return rc;
+ }
+
+ total_seq_len += iv_seq_len;
+ }
+
+ used_sw_slots = ((req_mgr_h->req_queue_head -
+ req_mgr_h->req_queue_tail) &
+ (MAX_REQUEST_QUEUE_SIZE - 1));
+ if (used_sw_slots > req_mgr_h->max_used_sw_slots)
+ req_mgr_h->max_used_sw_slots = used_sw_slots;
+
+ /* Enqueue request - must be locked with HW lock*/
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
+ req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
+ (MAX_REQUEST_QUEUE_SIZE - 1);
+ /* TODO: Use circ_buf.h ? */
+
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+
+ /* STAT_PHASE_4: Push sequence */
+ if (ivgen)
+ enqueue_seq(drvdata, iv_seq, iv_seq_len);
+
+ enqueue_seq(drvdata, desc, len);
+
+ if (add_comp) {
+ enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
+ total_seq_len++;
+ }
+
+ if (req_mgr_h->q_free_slots < total_seq_len) {
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
+ }
+
+ /* Operation still in process */
+ return -EINPROGRESS;
+}
+
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+ struct cc_bl_item *bli)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ spin_lock_bh(&mgr->bl_lock);
+ list_add_tail(&bli->list, &mgr->backlog);
+ ++mgr->bl_len;
+ spin_unlock_bh(&mgr->bl_lock);
+ tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct cc_bl_item *bli;
+ struct cc_crypto_req *creq;
+ struct crypto_async_request *req;
+ bool ivgen;
+ unsigned int total_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ spin_lock(&mgr->bl_lock);
+
+ while (mgr->bl_len) {
+ bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ spin_unlock(&mgr->bl_lock);
+
+ creq = &bli->creq;
+ req = (struct crypto_async_request *)creq->user_arg;
+
+ /*
+ * Notify the request we're moving out of the backlog
+ * but only if we haven't done so already.
+ */
+ if (!bli->notif) {
+ req->complete(req, -EINPROGRESS);
+ bli->notif = true;
+ }
+
+ ivgen = !!creq->ivgen_dma_addr_len;
+ total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+
+ spin_lock(&mgr->hw_lock);
+
+ rc = cc_queues_status(drvdata, mgr, total_len);
+ if (rc) {
+ /*
+ * There is still not room in the FIFO for
+ * this request. Bail out. We'll return here
+ * on the next completion irq.
+ */
+ spin_unlock(&mgr->hw_lock);
+ return;
+ }
+
+ rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
+ bli->len, false, ivgen);
+
+ spin_unlock(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ creq->user_cb(dev, req, rc);
+ }
+
+ /* Remove ourselves from the backlog list */
+ spin_lock(&mgr->bl_lock);
+ list_del(&bli->list);
+ --mgr->bl_len;
+ }
+
+ spin_unlock(&mgr->bl_lock);
+}
+
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req)
+{
+ int rc;
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ bool ivgen = !!cc_req->ivgen_dma_addr_len;
+ unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+ struct device *dev = drvdata_to_dev(drvdata);
+ bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+ gfp_t flags = cc_gfp_flags(req);
+ struct cc_bl_item *bli;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, total_len);
+
+#ifdef CC_DEBUG_FORCE_BACKLOG
+ if (backlog_ok)
+ rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
+
+ if (rc == -ENOSPC && backlog_ok) {
+ spin_unlock_bh(&mgr->hw_lock);
+
+ bli = kmalloc(sizeof(*bli), flags);
+ if (!bli) {
+ cc_pm_put_suspend(dev);
+ return -ENOMEM;
+ }
+
+ memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+ memcpy(&bli->desc, desc, len * sizeof(*desc));
+ bli->len = len;
+ bli->notif = false;
+ cc_enqueue_backlog(drvdata, bli);
+ return -EBUSY;
+ }
+
+ if (!rc)
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
+ ivgen);
+
+ spin_unlock_bh(&mgr->hw_lock);
+ return rc;
+}
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ int rc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ init_completion(&cc_req->seq_compl);
+ cc_req->user_cb = request_mgr_complete;
+ cc_req->user_arg = &cc_req->seq_compl;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ while (true) {
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, len + 1);
+
+ if (!rc)
+ break;
+
+ spin_unlock_bh(&mgr->hw_lock);
+ if (rc != -EAGAIN) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+ wait_for_completion_interruptible(&drvdata->hw_queue_avail);
+ reinit_completion(&drvdata->hw_queue_avail);
+ }
+
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
+ spin_unlock_bh(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+
+ wait_for_completion(&cc_req->seq_compl);
+ return 0;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware during init process.
+ * assume this function is not called in middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * \param drvdata
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ *
+ * \return int Returns "0" upon success
+ */
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ int rc = 0;
+
+ /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
+ */
+ rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
+ if (rc)
+ return rc;
+
+ set_queue_last_ind(drvdata, &desc[(len - 1)]);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+ enqueue_seq(drvdata, desc, len);
+
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+
+ return 0;
+}
+
+void complete_request(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ complete(&drvdata->hw_queue_avail);
+#ifdef COMP_IN_WQ
+ queue_delayed_work(request_mgr_handle->workq,
+ &request_mgr_handle->compwork, 0);
+#else
+ tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+ struct cc_drvdata *drvdata =
+ container_of(work, struct cc_drvdata, compwork.work);
+
+ comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_req *cc_req;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+ unsigned int *tail = &request_mgr_handle->req_queue_tail;
+ unsigned int *head = &request_mgr_handle->req_queue_head;
+
+ while (request_mgr_handle->axi_completed) {
+ request_mgr_handle->axi_completed--;
+
+ /* Dequeue request */
+ if (*head == *tail) {
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ *head);
+ break;
+ }
+
+ cc_req = &request_mgr_handle->req_queue[*tail];
+
+ if (cc_req->user_cb)
+ cc_req->user_cb(dev, cc_req->user_arg, 0);
+ *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
+ cc_pm_put_suspend(dev);
+ }
+}
+
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
+{
+ return FIELD_GET(AXIM_MON_COMP_VALUE,
+ cc_ioread(drvdata, drvdata->axim_mon_offset));
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ u32 irq;
+
+ irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+
+ if (irq & CC_COMP_IRQ_MASK) {
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+
+ /* Avoid race with above clear: Test completion counter
+ * once more
+ */
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
+ request_mgr_handle->axi_completed =
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR),
+ CC_COMP_IRQ_MASK);
+
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+ }
+ }
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+
+ cc_proc_backlog(drvdata);
+}
+
+/*
+ * resume the queue configuration - no need to take the lock as this happens
+ * inside the spin lock protection
+ */
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ request_mgr_handle->is_runtime_suspended = false;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+/*
+ * suspend the queue configuration. Since it is used for the runtime suspend
+ * only verify that the queue can be suspended.
+ */
+int cc_suspend_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ /* lock the send_request */
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ if (request_mgr_handle->req_queue_head !=
+ request_mgr_handle->req_queue_tail) {
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+ return -EBUSY;
+ }
+ request_mgr_handle->is_runtime_suspended = true;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ return request_mgr_handle->is_runtime_suspended;
+}
+
+#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
new file mode 100644
index 000000000000..573cb97af085
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_request_mgr.h
+ * Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ * If "false": this function adds a dummy descriptor completion
+ * and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS or error
+ */
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req);
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len);
+
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len);
+
+void complete_request(struct cc_drvdata *drvdata);
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
+
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata);
+
+int cc_suspend_req_queue(struct cc_drvdata *drvdata);
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
+#endif
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
new file mode 100644
index 000000000000..c8c276f6dee9
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include "cc_driver.h"
+#include "cc_sram_mgr.h"
+
+/**
+ * struct cc_sram_ctx -Internal RAM context manager
+ * @sram_free_offset: the offset to the non-allocated area
+ */
+struct cc_sram_ctx {
+ cc_sram_addr_t sram_free_offset;
+};
+
+/**
+ * cc_sram_mgr_fini() - Cleanup SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
+{
+ /* Free "this" context */
+ kfree(drvdata->sram_mgr_handle);
+}
+
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
+ * The pool starts right at the beginning of SRAM.
+ * Returns zero for success, negative value otherwise.
+ *
+ * @drvdata: Associated device driver context
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_sram_ctx *ctx;
+ dma_addr_t start = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (drvdata->hw_rev < CC_HW_REV_712) {
+ /* Pool starts after ROM bytes */
+ start = (dma_addr_t)cc_ioread(drvdata,
+ CC_REG(HOST_SEP_SRAM_THRESHOLD));
+
+ if ((start & 0x3) != 0) {
+ dev_err(dev, "Invalid SRAM offset %pad\n", &start);
+ return -EINVAL;
+ }
+ }
+
+ /* Allocate "this" context */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->sram_free_offset = start;
+ drvdata->sram_mgr_handle = ctx;
+
+ return 0;
+}
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
+{
+ struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+ cc_sram_addr_t p;
+
+ if ((size & 0x3)) {
+ dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
+ size);
+ return NULL_SRAM_ADDR;
+ }
+ if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
+ dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
+ size, smgr_ctx->sram_free_offset);
+ return NULL_SRAM_ADDR;
+ }
+
+ p = smgr_ctx->sram_free_offset;
+ smgr_ctx->sram_free_offset += size;
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
+ return p;
+}
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len)
+{
+ u32 i;
+ unsigned int idx = *seq_len;
+
+ for (i = 0; i < nelement; i++, idx++) {
+ hw_desc_init(&seq[idx]);
+ set_din_const(&seq[idx], src[i], sizeof(u32));
+ set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
+ set_flow_mode(&seq[idx], BYPASS);
+ }
+
+ *seq_len = idx;
+}
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
new file mode 100644
index 000000000000..d48649fb3323
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_SRAM_MGR_H__
+#define __CC_SRAM_MGR_H__
+
+#ifndef CC_CC_SRAM_SIZE
+#define CC_CC_SRAM_SIZE 4096
+#endif
+
+struct cc_drvdata;
+
+/**
+ * Address (offset) within CC internal SRAM
+ */
+
+typedef u64 cc_sram_addr_t;
+
+#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+
+/*!
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Uninits SRAM pool.
+ *
+ * \param drvdata
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len);
+
+#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 34a02d690548..59fe6631e73e 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -131,6 +131,11 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
+static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
+{
+ memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
+}
+
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,41 +165,6 @@ static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
return nents;
}
-static inline void chcr_handle_ahash_resp(struct ahash_request *req,
- unsigned char *input,
- int err)
-{
- struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- int digestsize, updated_digestsize;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
-
- if (input == NULL)
- goto out;
- digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- if (reqctx->is_sg_map)
- chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
- if (reqctx->dma_addr)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
- reqctx->dma_len, DMA_TO_DEVICE);
- reqctx->dma_addr = 0;
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- if (reqctx->result == 1) {
- reqctx->result = 0;
- memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
- digestsize);
- } else {
- memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
- updated_digestsize);
- }
-out:
- req->base.complete(&req->base, err);
-}
-
static inline int get_aead_subtype(struct crypto_aead *aead)
{
struct aead_alg *alg = crypto_aead_alg(aead);
@@ -247,34 +217,6 @@ static inline void chcr_handle_aead_resp(struct aead_request *req,
req->base.complete(&req->base, err);
}
-/*
- * chcr_handle_resp - Unmap the DMA buffers associated with the request
- * @req: crypto request
- */
-int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int err)
-{
- struct crypto_tfm *tfm = req->tfm;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct adapter *adap = padap(ctx->dev);
-
- switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
- break;
-
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
- input, err);
- break;
-
- case CRYPTO_ALG_TYPE_AHASH:
- chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
- }
- atomic_inc(&adap->chcr_stats.complete);
- return err;
-}
-
static void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key,
unsigned int keylength)
@@ -563,7 +505,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
if (!len)
return;
-
while (sg && skip) {
if (sg_dma_len(sg) <= skip) {
skip -= sg_dma_len(sg);
@@ -653,6 +594,35 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
}
return 0;
}
+
+static int chcr_hash_ent_in_wr(struct scatterlist *src,
+ unsigned int minsg,
+ unsigned int space,
+ unsigned int srcskip)
+{
+ int srclen = 0;
+ int srcsg = minsg;
+ int soffset = 0, sless;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
+ while (src && space > (sgl_ent_len[srcsg + 1])) {
+ sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
+ CHCR_SRC_SG_SIZE);
+ srclen += sless;
+ soffset += sless;
+ srcsg++;
+ if (sg_dma_len(src) == (soffset + srcskip)) {
+ src = sg_next(src);
+ soffset = 0;
+ srcskip = 0;
+ }
+ }
+ return srclen;
+}
+
static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst,
unsigned int minsg,
@@ -662,7 +632,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
{
int srclen = 0, dstlen = 0;
int srcsg = minsg, dstsg = minsg;
- int offset = 0, less;
+ int offset = 0, soffset = 0, less, sless = 0;
if (sg_dma_len(src) == srcskip) {
src = sg_next(src);
@@ -676,7 +646,9 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
while (src && dst &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
- srclen += (sg_dma_len(src) - srcskip);
+ sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
+ CHCR_SRC_SG_SIZE);
+ srclen += sless;
srcsg++;
offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
@@ -687,15 +659,20 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
- if (offset == sg_dma_len(dst)) {
+ if ((offset + dstskip) == sg_dma_len(dst)) {
dst = sg_next(dst);
offset = 0;
}
dstsg++;
dstskip = 0;
}
- src = sg_next(src);
- srcskip = 0;
+ soffset += sless;
+ if ((soffset + srcskip) == sg_dma_len(src)) {
+ src = sg_next(src);
+ srcskip = 0;
+ soffset = 0;
+ }
+
}
return min(srclen, dstlen);
}
@@ -784,14 +761,14 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
dst_size = get_space_for_phys_dsgl(nents + 1);
- kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
CHCR_SRC_SG_SIZE, reqctx->src_ofst);
- temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
- * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
+ temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
+ (sgl_len(nents + MIN_CIPHER_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
@@ -847,6 +824,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb;
+
+ if (reqctx->op && (ablkctx->ciph_mode ==
+ CHCR_SCMD_CIPHER_MODE_AES_CBC))
+ sg_pcopy_to_buffer(wrparam->req->src,
+ sg_nents(wrparam->req->src), wrparam->req->info, 16,
+ reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
+
return skb;
err:
return ERR_PTR(error);
@@ -1070,9 +1054,8 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
- 16,
- reqctx->processed - AES_BLOCK_SIZE);
+ /*Updated before sending last WR*/
+ memcpy(iv, req->info, AES_BLOCK_SIZE);
else
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1100,11 +1083,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
- if (reqctx->op)
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
- 16,
- reqctx->processed - AES_BLOCK_SIZE);
- else
+ /*Already updated for Decrypt*/
+ if (!reqctx->op)
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1143,12 +1123,12 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
}
if (!reqctx->imm) {
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
- SPACE_LEFT(ablkctx->enckey_len),
+ CIP_SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
- bytes = ROUND_16(bytes);
+ bytes = rounddown(bytes, 16);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1234,7 +1214,7 @@ static int process_cipher(struct ablkcipher_request *req,
CHCR_DST_SG_SIZE, 0);
dnents += 1; // IV
phys_dsgl = get_space_for_phys_dsgl(dnents);
- kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
reqctx->imm = (transhdr_len + IV + req->nbytes) <=
SGE_MAX_WR_LEN;
@@ -1247,12 +1227,12 @@ static int process_cipher(struct ablkcipher_request *req,
if (!reqctx->imm) {
bytes = chcr_sg_ent_in_wr(req->src, req->dst,
MIN_CIPHER_SG,
- SPACE_LEFT(ablkctx->enckey_len),
+ CIP_SPACE_LEFT(ablkctx->enckey_len),
0, 0);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
- bytes = ROUND_16(bytes);
+ bytes = rounddown(bytes, 16);
} else {
bytes = req->nbytes;
}
@@ -1282,7 +1262,7 @@ static int process_cipher(struct ablkcipher_request *req,
req->src,
req->dst,
req->nbytes,
- req->info,
+ reqctx->iv,
op_type);
goto error;
}
@@ -1503,35 +1483,24 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
struct chcr_wr *chcr_req;
struct ulptx_sgl *ulptx;
- unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = 0, temp = 0;
- u8 hash_size_in_response = 0;
+ unsigned int nents = 0, transhdr_len;
+ unsigned int temp = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(h_ctx(tfm)->dev);
int error = 0;
- iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len = param->alg_prm.result_size + iopad_alignment;
- if (param->opad_needed)
- kctx_len += param->alg_prm.result_size + iopad_alignment;
-
- if (req_ctx->result)
- hash_size_in_response = digestsize;
- else
- hash_size_in_response = param->alg_prm.result_size;
- transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
- SGE_MAX_WR_LEN;
- nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
+ transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
+ req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
+ param->sg_len) <= SGE_MAX_WR_LEN;
+ nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
+ CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
nents += param->bfr_len ? 1 : 0;
- transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
- param->sg_len), 16) * 16) :
- (sgl_len(nents) * 8);
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
+ param->sg_len, 16) : (sgl_len(nents) * 8);
+ transhdr_len = roundup(transhdr_len, 16);
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb)
return ERR_PTR(-ENOMEM);
chcr_req = __skb_put_zero(skb, transhdr_len);
@@ -1563,33 +1532,33 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- ((kctx_len +
+ ((param->kctx_len +
sizeof(chcr_req->key_ctx)) >> 4));
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
- ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
+ ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
DUMMY_BYTES);
if (param->bfr_len != 0) {
- req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
- req_ctx->reqbfr, param->bfr_len,
- DMA_TO_DEVICE);
+ req_ctx->hctx_wr.dma_addr =
+ dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
+ param->bfr_len, DMA_TO_DEVICE);
if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
- req_ctx->dma_addr)) {
+ req_ctx->hctx_wr. dma_addr)) {
error = -ENOMEM;
goto err;
}
- req_ctx->dma_len = param->bfr_len;
+ req_ctx->hctx_wr.dma_len = param->bfr_len;
} else {
- req_ctx->dma_addr = 0;
+ req_ctx->hctx_wr.dma_addr = 0;
}
chcr_add_hash_src_ent(req, ulptx, param);
/* Request upto max wr size */
- temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
- + param->bfr_len) : 0);
+ temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
+ (param->sg_len + param->bfr_len) : 0);
atomic_inc(&adap->chcr_stats.digest_rqst);
- create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
- hash_size_in_response, transhdr_len,
+ create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
+ param->hash_size, transhdr_len,
temp, 0);
- req_ctx->skb = skb;
+ req_ctx->hctx_wr.skb = skb;
return skb;
err:
kfree_skb(skb);
@@ -1608,7 +1577,6 @@ static int chcr_ahash_update(struct ahash_request *req)
int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
-
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
@@ -1625,17 +1593,26 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
+ chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len > req->nbytes)
+ params.sg_len = req->nbytes;
+ params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
+ req_ctx->reqlen;
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->reqlen;
params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
- get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
- req_ctx->result = 0;
+ req_ctx->hctx_wr.srcsg = req->src;
+
+ params.hash_size = params.alg_prm.result_size;
req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) {
@@ -1643,6 +1620,7 @@ static int chcr_ahash_update(struct ahash_request *req)
goto unmap;
}
+ req_ctx->hctx_wr.processed += params.sg_len;
if (remainder) {
/* Swap buffers */
swap(req_ctx->reqbfr, req_ctx->skbfr);
@@ -1680,16 +1658,27 @@ static int chcr_ahash_final(struct ahash_request *req)
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ chcr_init_hctx_per_wr(req_ctx);
u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
params.opad_needed = 0;
params.sg_len = 0;
+ req_ctx->hctx_wr.isfinal = 1;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
- req_ctx->result = 1;
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.opad_needed = 1;
+ params.kctx_len *= 2;
+ } else {
+ params.opad_needed = 0;
+ }
+
+ req_ctx->hctx_wr.result = 1;
params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
+ req_ctx->hctx_wr.srcsg = req->src;
if (req_ctx->reqlen == 0) {
create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
@@ -1702,10 +1691,11 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
+ params.hash_size = crypto_ahash_digestsize(rtfm);
skb = create_hash_wr(req, &params);
if (IS_ERR(skb))
return PTR_ERR(skb);
-
+ req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
@@ -1730,37 +1720,59 @@ static int chcr_ahash_finup(struct ahash_request *req)
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
- if (is_hmac(crypto_ahash_tfm(rtfm)))
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
params.opad_needed = 1;
- else
+ } else {
params.opad_needed = 0;
+ }
- params.sg_len = req->nbytes;
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len < req->nbytes) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
+ - req_ctx->reqlen;
+ params.hash_size = params.alg_prm.result_size;
+ params.scmd1 = 0;
+ } else {
+ params.last = 1;
+ params.more = 0;
+ params.sg_len = req->nbytes;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
+ params.sg_len;
+ }
params.bfr_len = req_ctx->reqlen;
- get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
- req_ctx->result = 1;
+ req_ctx->hctx_wr.result = 1;
+ req_ctx->hctx_wr.srcsg = req->src;
if ((req_ctx->reqlen + req->nbytes) == 0) {
create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
params.bfr_len = bs;
- } else {
- params.scmd1 = req_ctx->data_len;
- params.last = 1;
- params.more = 0;
}
- error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
-
skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) {
error = PTR_ERR(skb);
goto unmap;
}
+ req_ctx->reqlen = 0;
+ req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
@@ -1791,21 +1803,42 @@ static int chcr_ahash_digest(struct ahash_request *req)
return -EBUSY;
}
- if (is_hmac(crypto_ahash_tfm(rtfm)))
- params.opad_needed = 1;
- else
- params.opad_needed = 0;
+ chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
- params.last = 0;
- params.more = 0;
- params.sg_len = req->nbytes;
- params.bfr_len = 0;
- params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
- req_ctx->result = 1;
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len < req->nbytes) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.sg_len = rounddown(params.sg_len, bs);
+ params.hash_size = params.alg_prm.result_size;
+ } else {
+ params.sg_len = req->nbytes;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.last = 1;
+ params.more = 0;
+ params.scmd1 = req->nbytes + req_ctx->data_len;
+
+ }
+ params.bfr_len = 0;
+ req_ctx->hctx_wr.result = 1;
+ req_ctx->hctx_wr.srcsg = req->src;
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req->nbytes == 0) {
@@ -1819,6 +1852,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
error = PTR_ERR(skb);
goto unmap;
}
+ req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
@@ -1828,6 +1862,151 @@ unmap:
return error;
}
+static int chcr_ahash_continue(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+ int error;
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ u_ctx = ULD_CTX(h_ctx(rtfm));
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+ params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
+ HASH_SPACE_LEFT(params.kctx_len),
+ hctx_wr->src_ofst);
+ if ((params.sg_len + hctx_wr->processed) > req->nbytes)
+ params.sg_len = req->nbytes - hctx_wr->processed;
+ if (!hctx_wr->result ||
+ ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.sg_len = rounddown(params.sg_len, bs);
+ params.hash_size = params.alg_prm.result_size;
+ params.scmd1 = 0;
+ } else {
+ params.last = 1;
+ params.more = 0;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.scmd1 = reqctx->data_len + params.sg_len;
+ }
+ params.bfr_len = 0;
+ reqctx->data_len += params.sg_len;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
+ hctx_wr->processed += params.sg_len;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return 0;
+err:
+ return error;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+ if (input == NULL)
+ goto out;
+ digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+
+ if (hctx_wr->dma_addr) {
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
+ hctx_wr->dma_len, DMA_TO_DEVICE);
+ hctx_wr->dma_addr = 0;
+ }
+ if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
+ req->nbytes)) {
+ if (hctx_wr->result == 1) {
+ hctx_wr->result = 0;
+ memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(reqctx->partial_hash,
+ input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+
+ }
+ goto unmap;
+ }
+ memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+
+ err = chcr_ahash_continue(req);
+ if (err)
+ goto unmap;
+ return;
+unmap:
+ if (hctx_wr->is_sg_map)
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+
+
+out:
+ req->base.complete(&req->base, err);
+}
+
+/*
+ * chcr_handle_resp - Unmap the DMA buffers associated with the request
+ * @req: crypto request
+ */
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+ int err)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ break;
+
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ input, err);
+ break;
+
+ case CRYPTO_ALG_TYPE_AHASH:
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
+ }
+ atomic_inc(&adap->chcr_stats.complete);
+ return err;
+}
static int chcr_ahash_export(struct ahash_request *areq, void *out)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
@@ -1835,11 +2014,10 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- state->is_sg_map = 0;
- state->result = 0;
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(state);
return 0;
}
@@ -1852,11 +2030,10 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
req_ctx->data_len = state->data_len;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
- req_ctx->is_sg_map = 0;
- req_ctx->result = 0;
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(req_ctx);
return 0;
}
@@ -1953,10 +2130,8 @@ static int chcr_sha_init(struct ahash_request *areq)
req_ctx->reqlen = 0;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
- req_ctx->skb = NULL;
- req_ctx->result = 0;
- req_ctx->is_sg_map = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
+
return 0;
}
@@ -2124,11 +2299,11 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
- * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
+ : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ MIN_GCM_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) {
@@ -2187,9 +2362,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
- 4), actx->h_iopad, kctx_len -
- (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
@@ -2398,22 +2572,26 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- if (reqctx->imm) {
+ if (reqctx->hctx_wr.imm) {
u8 *buf = (u8 *)ulptx;
if (param->bfr_len) {
memcpy(buf, reqctx->reqbfr, param->bfr_len);
buf += param->bfr_len;
}
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, param->sg_len, 0);
+
+ sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
+ sg_nents(reqctx->hctx_wr.srcsg), buf,
+ param->sg_len, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->dma_addr);
- ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
+ &reqctx->hctx_wr.dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
+ param->sg_len, reqctx->hctx_wr.src_ofst);
+ reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
+ reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
ulptx_walk_end(&ulp_walk);
}
}
@@ -2430,7 +2608,7 @@ int chcr_hash_dma_map(struct device *dev,
DMA_TO_DEVICE);
if (!error)
return -ENOMEM;
- req_ctx->is_sg_map = 1;
+ req_ctx->hctx_wr.is_sg_map = 1;
return 0;
}
@@ -2444,7 +2622,7 @@ void chcr_hash_dma_unmap(struct device *dev,
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
- req_ctx->is_sg_map = 0;
+ req_ctx->hctx_wr.is_sg_map = 0;
}
@@ -2636,10 +2814,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
0, dst_size);
}
-int aead_ccm_validate_input(unsigned short op_type,
- struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
- unsigned int sub_type)
+static int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
{
if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
if (crypto_ccm_check_iv(req->iv)) {
@@ -2696,16 +2874,16 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
- reqctx->b0_len), 16) * 16) :
+ temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+ reqctx->b0_len, 16) :
(sgl_len(reqctx->src_nents + reqctx->aad_nents +
MIN_CCM_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
reqctx->b0_len, transhdr_len, op_type)) {
@@ -2727,8 +2905,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
- 16), aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
@@ -2798,16 +2976,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
- AEAD_H_SIZE;
+ kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
- req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
+ (sgl_len(reqctx->src_nents +
+ reqctx->aad_nents + MIN_GCM_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
@@ -2846,8 +3023,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
- 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
/* prepare a 16 byte iv */
/* S A L T | IV | 0x00000001 */
@@ -3067,11 +3244,10 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
- key_ctx_size = sizeof(struct _key_ctx) +
- ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
if (keylen == AES_KEYSIZE_128) {
- mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
@@ -3178,10 +3354,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
memcpy(aeadctx->key, key, keylen);
aeadctx->enckey_len = keylen;
- key_ctx_size = sizeof(struct _key_ctx) +
- ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
AEAD_H_SIZE;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
CHCR_KEYCTX_MAC_KEY_SIZE_128,
0, 0,
key_ctx_size >> 4);
@@ -3281,6 +3456,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
{
@@ -3325,17 +3501,19 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
chcr_change_order(actx->h_iopad, param.result_size);
chcr_change_order(o_ptr, param.result_size);
key_ctx_len = sizeof(struct _key_ctx) +
- ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ roundup(keys.enckeylen, 16) +
(param.result_size + align) * 2;
aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
0, 1, key_ctx_len >> 4);
actx->auth_mode = param.auth_mode;
chcr_free_shash(base_hash);
+ memzero_explicit(&keys, sizeof(keys));
return 0;
}
out:
aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
if (!IS_ERR(base_hash))
chcr_free_shash(base_hash);
return -EINVAL;
@@ -3393,15 +3571,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
aeadctx->enckey_len << 3);
}
- key_ctx_len = sizeof(struct _key_ctx)
- + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
0, key_ctx_len >> 4);
actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ memzero_explicit(&keys, sizeof(keys));
return 0;
out:
aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f263cd42a84f..dba3dff1e209 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -258,15 +258,16 @@
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8
-#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32
#define MIN_CIPHER_SG 1 /* IV */
#define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
#define MIN_CCM_SG 2 /*IV+B0*/
-#define SPACE_LEFT(len) \
- ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
+#define CIP_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
+#define HASH_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len)))
struct algo_param {
unsigned int auth_mode;
@@ -275,12 +276,14 @@ struct algo_param {
};
struct hash_wr_param {
+ struct algo_param alg_prm;
unsigned int opad_needed;
unsigned int more;
unsigned int last;
- struct algo_param alg_prm;
+ unsigned int kctx_len;
unsigned int sg_len;
unsigned int bfr_len;
+ unsigned int hash_size;
u64 scmd1;
};
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 77056a90c8e1..1a20424e18c6 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -54,10 +54,14 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
#define MAX_SALT 4
-#define WR_MIN_LEN (sizeof(struct chcr_wr) + \
+#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
sizeof(struct cpl_rx_phys_dsgl) + \
sizeof(struct ulptx_sgl))
+#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+ DUMMY_BYTES + \
+ sizeof(struct ulptx_sgl))
+
#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
struct uld_ctx;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 7daf0a17a7d2..c8e8972af283 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -258,21 +258,32 @@ struct chcr_context {
struct __crypto_ctx crypto_ctx[0];
};
-struct chcr_ahash_req_ctx {
+struct chcr_hctx_per_wr {
+ struct scatterlist *srcsg;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 dma_len;
+ unsigned int src_ofst;
+ unsigned int processed;
u32 result;
- u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 is_sg_map;
+ u8 imm;
+ /*Final callback called. Driver cannot rely on nbytes to decide
+ * final call
+ */
+ u8 isfinal;
+};
+
+struct chcr_ahash_req_ctx {
+ struct chcr_hctx_per_wr hctx_wr;
u8 *reqbfr;
u8 *skbfr;
- dma_addr_t dma_addr;
- u32 dma_len;
+ /* SKB which is being sent to the hardware for processing */
+ u64 data_len; /* Data len till time */
u8 reqlen;
- u8 imm;
- u8 is_sg_map;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
- u64 data_len; /* Data len till time */
- /* SKB which is being sent to the hardware for processing */
- struct sk_buff *skb;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
struct chcr_blkcipher_req_ctx {
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index db1e241104ed..8e0aa3f175c9 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -360,8 +360,7 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
cpl = (struct cpl_tx_pkt_core *)pos;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
TXPKT_PF_V(adap->pf);
if (skb_vlan_tag_present(skb)) {
@@ -475,7 +474,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
/* Sub-command */
- wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(wr->req.key_ctx) +
kctx_len +
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 225e74a7f724..d4a81be0d7d2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -235,7 +235,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
+ val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
writel(val,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
@@ -332,7 +332,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
- val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
/* FIXME: instability issues can occur for EIP97 but disabling it impact
* performances.
*/
@@ -354,7 +354,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
- val |= EIP197_ALG_SHA2;
+ val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
@@ -432,20 +432,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
}
/* Called with ring's lock taken */
-static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
- int ring, int reqs)
+static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring)
{
- int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+ int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
if (!coal)
- return 0;
+ return;
/* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
-
- return coal;
}
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
@@ -490,6 +488,15 @@ handle_req:
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
+ /* In case the send() helper did not issue any command to push
+ * to the engine because the input data was cached, continue to
+ * dequeue other requests as this is valid and not an error.
+ */
+ if (!commands && !results) {
+ kfree(request);
+ continue;
+ }
+
spin_lock_bh(&priv->ring[ring].egress_lock);
list_add_tail(&request->list, &priv->ring[ring].list);
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -512,14 +519,13 @@ finalize:
spin_lock_bh(&priv->ring[ring].egress_lock);
+ priv->ring[ring].requests += nreq;
+
if (!priv->ring[ring].busy) {
- nreq -= safexcel_try_push_requests(priv, ring, nreq);
- if (nreq)
- priv->ring[ring].busy = true;
+ safexcel_try_push_requests(priv, ring);
+ priv->ring[ring].busy = true;
}
- priv->ring[ring].requests_left += nreq;
-
spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
@@ -531,25 +537,6 @@ finalize:
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
- struct crypto_async_request *req,
- int result_sz)
-{
- struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
-
- if (ctx->result_dma)
- dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
- DMA_FROM_DEVICE);
-
- if (ctx->cache) {
- dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
- DMA_TO_DEVICE);
- kfree(ctx->cache);
- ctx->cache = NULL;
- ctx->cache_sz = 0;
- }
-}
-
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -623,7 +610,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc, tot_descs, done;
+ int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
handle_results:
@@ -659,6 +646,7 @@ handle_results:
kfree(sreq);
tot_descs += ndesc;
+ handled++;
}
acknowledge:
@@ -677,11 +665,10 @@ acknowledge:
requests_left:
spin_lock_bh(&priv->ring[ring].egress_lock);
- done = safexcel_try_push_requests(priv, ring,
- priv->ring[ring].requests_left);
+ priv->ring[ring].requests -= handled;
+ safexcel_try_push_requests(priv, ring);
- priv->ring[ring].requests_left -= done;
- if (!done && !priv->ring[ring].requests_left)
+ if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -781,6 +768,8 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_sha224,
&safexcel_alg_sha256,
&safexcel_alg_hmac_sha1,
+ &safexcel_alg_hmac_sha224,
+ &safexcel_alg_hmac_sha256,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -894,29 +883,44 @@ static int safexcel_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
- priv->clk = of_clk_get(dev->of_node, 0);
- if (!IS_ERR(priv->clk)) {
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ ret = PTR_ERR_OR_ZERO(priv->clk);
+ /* The clock isn't mandatory */
+ if (ret != -ENOENT) {
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "unable to enable clk (%d)\n", ret);
return ret;
}
- } else {
- /* The clock isn't mandatory */
- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ }
+
+ priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
+ ret = PTR_ERR_OR_ZERO(priv->reg_clk);
+ /* The clock isn't mandatory */
+ if (ret != -ENOENT) {
+ if (ret)
+ goto err_core_clk;
+
+ ret = clk_prepare_enable(priv->reg_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable reg clk (%d)\n", ret);
+ goto err_core_clk;
+ }
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
- goto err_clk;
+ goto err_reg_clk;
priv->context_pool = dmam_pool_create("safexcel-context", dev,
sizeof(struct safexcel_context_record),
1, 0);
if (!priv->context_pool) {
ret = -ENOMEM;
- goto err_clk;
+ goto err_reg_clk;
}
safexcel_configure(priv);
@@ -931,12 +935,12 @@ static int safexcel_probe(struct platform_device *pdev)
&priv->ring[i].cdr,
&priv->ring[i].rdr);
if (ret)
- goto err_clk;
+ goto err_reg_clk;
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
- goto err_clk;
+ goto err_reg_clk;
}
ring_irq->priv = priv;
@@ -948,7 +952,7 @@ static int safexcel_probe(struct platform_device *pdev)
ring_irq);
if (irq < 0) {
ret = irq;
- goto err_clk;
+ goto err_reg_clk;
}
priv->ring[i].work_data.priv = priv;
@@ -959,10 +963,10 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
if (!priv->ring[i].workqueue) {
ret = -ENOMEM;
- goto err_clk;
+ goto err_reg_clk;
}
- priv->ring[i].requests_left = 0;
+ priv->ring[i].requests = 0;
priv->ring[i].busy = false;
crypto_init_queue(&priv->ring[i].queue,
@@ -980,18 +984,20 @@ static int safexcel_probe(struct platform_device *pdev)
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "EIP h/w init failed (%d)\n", ret);
- goto err_clk;
+ goto err_reg_clk;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- goto err_clk;
+ goto err_reg_clk;
}
return 0;
-err_clk:
+err_reg_clk:
+ clk_disable_unprepare(priv->reg_clk);
+err_core_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 4e219c21608b..b470a849721f 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -135,7 +135,7 @@
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
-#define EIP197_HIA_xDR_WR_CTRL_BUG BIT(23)
+#define EIP197_HIA_xDR_WR_CTRL_BUF BIT(23)
#define EIP197_HIA_xDR_WR_OWN_BUF BIT(24)
#define EIP197_HIA_xDR_CFG_WR_CACHE(n) (((n) & 0x7) << 25)
#define EIP197_HIA_xDR_CFG_RD_CACHE(n) (((n) & 0x7) << 29)
@@ -179,7 +179,7 @@
#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n) ((n) << 0)
#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n) (((n) & 0x7) << 4)
#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n) ((n) << 8)
-#define EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE GENMASK(15, 14)
+#define EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE GENMASK(15, 14)
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
@@ -525,6 +525,7 @@ struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
+ struct clk *reg_clk;
struct safexcel_config config;
enum safexcel_eip_version version;
@@ -551,10 +552,8 @@ struct safexcel_crypto_priv {
struct crypto_queue queue;
spinlock_t queue_lock;
- /* Number of requests in the engine that needs the threshold
- * interrupt to be set up.
- */
- int requests_left;
+ /* Number of requests in the engine. */
+ int requests;
/* The ring is currently handling at least one request */
bool busy;
@@ -580,12 +579,6 @@ struct safexcel_context {
int ring;
bool needs_inv;
bool exit_inv;
-
- /* Used for ahash requests */
- dma_addr_t result_dma;
- void *cache;
- dma_addr_t cache_dma;
- unsigned int cache_sz;
};
/*
@@ -609,9 +602,6 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
- struct crypto_async_request *req,
- int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
@@ -643,5 +633,7 @@ extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
extern struct safexcel_alg_template safexcel_alg_sha256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 63a8768ed2ae..bafb60505fab 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -58,7 +58,8 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
+ token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
token[0].instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYTO |
EIP197_TOKEN_INS_TYPE_OUTPUT;
@@ -456,7 +457,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
if (result.error) {
dev_warn(priv->dev,
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 122a2a58e98f..317b9e480312 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -21,10 +21,9 @@ struct safexcel_ahash_ctx {
struct safexcel_crypto_priv *priv;
u32 alg;
- u32 digest;
- u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_ahash_req {
@@ -34,6 +33,9 @@ struct safexcel_ahash_req {
bool needs_inv;
int nents;
+ dma_addr_t result_dma;
+
+ u32 digest;
u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
@@ -42,6 +44,9 @@ struct safexcel_ahash_req {
u64 processed;
u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t cache_dma;
+ unsigned int cache_sz;
+
u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
};
@@ -49,6 +54,8 @@ struct safexcel_ahash_export_state {
u64 len;
u64 processed;
+ u32 digest;
+
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
u8 cache[SHA256_BLOCK_SIZE];
};
@@ -82,9 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
cdesc->control_data.control0 |= ctx->alg;
- cdesc->control_data.control0 |= ctx->digest;
+ cdesc->control_data.control0 |= req->digest;
- if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+ if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
if (req->processed) {
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
@@ -112,12 +119,12 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
if (req->finish)
ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
}
- } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
- cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
+ } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
- memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
- memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
- ctx->opad, digestsize);
+ memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
+ memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
+ ctx->opad, req->state_sz);
}
}
@@ -149,16 +156,26 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (sreq->finish)
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
-
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
sreq->nents = 0;
}
- safexcel_free_context(priv, async, sreq->state_sz);
+ if (sreq->result_dma) {
+ dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+ DMA_FROM_DEVICE);
+ sreq->result_dma = 0;
+ }
+
+ if (sreq->cache_dma) {
+ dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+ DMA_TO_DEVICE);
+ sreq->cache_dma = 0;
+ }
+
+ if (sreq->finish)
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
cache_len = sreq->len - sreq->processed;
if (cache_len)
@@ -184,7 +201,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
- if (queued < crypto_ahash_blocksize(ahash))
+ if (queued <= crypto_ahash_blocksize(ahash))
cache_len = queued;
else
cache_len = queued - areq->nbytes;
@@ -198,7 +215,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* If this is not the last request and the queued data
* is a multiple of a block, cache the last one for now.
*/
- extra = queued - crypto_ahash_blocksize(ahash);
+ extra = crypto_ahash_blocksize(ahash);
if (extra) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
@@ -220,24 +237,17 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
- ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
- if (!ctx->base.cache) {
- ret = -ENOMEM;
- goto unlock;
- }
- memcpy(ctx->base.cache, req->cache, cache_len);
- ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
- cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
- ret = -EINVAL;
- goto free_cache;
+ req->cache_dma = dma_map_single(priv->dev, req->cache,
+ cache_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, req->cache_dma)) {
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
+ return -EINVAL;
}
- ctx->base.cache_sz = cache_len;
+ req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
- ctx->base.cache_dma,
- cache_len, len,
+ req->cache_dma, cache_len, len,
ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
@@ -271,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
sglen, len, ctx->base.ctxr_dma);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
- goto cdesc_rollback;
+ goto unmap_sg;
}
n_cdesc++;
@@ -291,19 +301,19 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, req->state,
- req->state_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
+ req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL;
- goto cdesc_rollback;
+ goto unmap_sg;
}
/* Add a result descriptor */
- rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
+ rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
req->state_sz);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
- goto cdesc_rollback;
+ goto unmap_result;
}
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -315,20 +325,21 @@ send_command:
*results = 1;
return 0;
+unmap_result:
+ dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+ DMA_FROM_DEVICE);
+unmap_sg:
+ dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache:
- if (ctx->base.cache_dma) {
- dma_unmap_single(priv->dev, ctx->base.cache_dma,
- ctx->base.cache_sz, DMA_TO_DEVICE);
- ctx->base.cache_sz = 0;
+ if (req->cache_dma) {
+ dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+ DMA_TO_DEVICE);
+ req->cache_sz = 0;
}
-free_cache:
- kfree(ctx->base.cache);
- ctx->base.cache = NULL;
-unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
@@ -493,7 +504,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
if (result.error) {
dev_warn(priv->dev, "hash: completion error (%d)\n",
@@ -550,7 +561,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (ctx->base.ctxr) {
if (priv->version == EIP197 &&
!ctx->base.needs_inv && req->processed &&
- ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
/* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
* set in other functions and we want to keep the same
@@ -585,7 +596,6 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
static int safexcel_ahash_update(struct ahash_request *areq)
{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -601,7 +611,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
* We're not doing partial updates when performing an hmac request.
* Everything will be handled by the final() call.
*/
- if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
return 0;
if (req->hmac)
@@ -660,6 +670,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->len = req->len;
export->processed = req->processed;
+ export->digest = req->digest;
+
memcpy(export->state, req->state, req->state_sz);
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
@@ -680,6 +692,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
req->len = export->len;
req->processed = export->processed;
+ req->digest = export->digest;
+
memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
memcpy(req->state, export->state, req->state_sz);
@@ -716,7 +730,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
req->state[4] = SHA1_H4;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -783,10 +797,10 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
static int safexcel_hmac_sha1_init(struct ahash_request *areq)
{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
safexcel_sha1_init(areq);
- ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
return 0;
}
@@ -839,7 +853,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
init_completion(&result.completion);
ret = crypto_ahash_digest(areq);
- if (ret == -EINPROGRESS) {
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
wait_for_completion_interruptible(&result.completion);
ret = result.error;
}
@@ -949,20 +963,21 @@ free_ahash:
return ret;
}
-static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
- ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
+ ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
if (ret)
return ret;
if (priv->version == EIP197 && ctx->base.ctxr) {
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ for (i = 0; i < state_sz / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
ctx->base.needs_inv = true;
@@ -971,12 +986,19 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
}
}
- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+ memcpy(ctx->ipad, &istate.state, state_sz);
+ memcpy(ctx->opad, &ostate.state, state_sz);
return 0;
}
+static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
+ SHA1_DIGEST_SIZE);
+}
+
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.alg.ahash = {
@@ -1024,7 +1046,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
req->state[7] = SHA256_H7;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
return 0;
@@ -1086,7 +1108,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
req->state[7] = SHA224_H7;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
return 0;
@@ -1130,3 +1152,115 @@ struct safexcel_alg_template safexcel_alg_sha224 = {
},
},
};
+
+static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
+ SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha224_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha224_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha224_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha224_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha224_digest,
+ .setkey = safexcel_hmac_sha224_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "safexcel-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
+ SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha256_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha256_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha256_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha256_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha256_digest,
+ .setkey = safexcel_hmac_sha256_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "safexcel-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 717a26607bdb..27f7dad2d45d 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1167,9 +1167,11 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->authkey_len = keys.authkeylen;
ctx->enckey_len = keys.enckeylen;
+ memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index aca2373fa1de..f81fa4a3e66b 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -25,7 +25,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 764be3e6933c..a10c418d4e5c 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -759,6 +759,16 @@ static int dcp_sha_digest(struct ahash_request *req)
return dcp_sha_finup(req);
}
+static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int dcp_sha_noexport(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -829,6 +839,8 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
+ .import = dcp_sha_noimport,
+ .export = dcp_sha_noexport,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.base = {
@@ -853,6 +865,8 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
+ .import = dcp_sha_noimport,
+ .export = dcp_sha_noexport,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.base = {
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 662e709812cc..80e9c842aad4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -359,6 +359,16 @@ static int n2_hash_async_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->fallback_req);
}
+static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int n2_hash_async_noexport(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
static int n2_hash_cra_init(struct crypto_tfm *tfm)
{
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
@@ -1467,6 +1477,8 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
ahash->final = n2_hash_async_final;
ahash->finup = n2_hash_async_finup;
ahash->digest = n2_hash_async_digest;
+ ahash->export = n2_hash_async_noexport;
+ ahash->import = n2_hash_async_noimport;
halg = &ahash->halg;
halg->digestsize = tmpl->digest_size;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index bf52cd1d7fca..66869976cfa2 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -1105,10 +1105,9 @@ static int __init nx842_pseries_init(void)
RCU_INIT_POINTER(devdata, NULL);
new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
- if (!new_devdata) {
- pr_err("Could not allocate memory for device data\n");
+ if (!new_devdata)
return -ENOMEM;
- }
+
RCU_INIT_POINTER(devdata, new_devdata);
ret = vio_register_driver(&nx842_vio_driver);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index fbec0a2e76dd..9019f6b67986 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -47,6 +47,8 @@
static LIST_HEAD(dev_list);
static DEFINE_SPINLOCK(list_lock);
+static int aes_fallback_sz = 200;
+
#ifdef DEBUG
#define omap_aes_read(dd, offset) \
({ \
@@ -388,7 +390,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- crypto_finalize_cipher_request(dd->engine, req, err);
+ crypto_finalize_ablkcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -408,14 +410,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct ablkcipher_request *req)
{
if (req)
- return crypto_transfer_cipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
return 0;
}
static int omap_aes_prepare_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
@@ -468,8 +471,9 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
}
static int omap_aes_crypt_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
@@ -517,7 +521,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (req->nbytes < 200) {
+ if (req->nbytes < aes_fallback_sz) {
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_tfm(subreq, ctx->fallback);
@@ -601,6 +605,11 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
return omap_aes_crypt(req, FLAGS_CTR);
}
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+ void *req);
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+ void *req);
+
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
@@ -616,6 +625,10 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+ ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
+
return 0;
}
@@ -1029,6 +1042,87 @@ err:
return err;
}
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", aes_fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t status;
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ /* HW accelerator only works with buffers > 9 */
+ if (value < 9) {
+ dev_err(dev, "minimum fallback size 9\n");
+ return -EINVAL;
+ }
+
+ aes_fallback_sz = value;
+
+ return size;
+}
+
+static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_aes_dev *dd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
+}
+
+static ssize_t queue_len_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct omap_aes_dev *dd;
+ ssize_t status;
+ long value;
+ unsigned long flags;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ if (value < 1)
+ return -EINVAL;
+
+ /*
+ * Changing the queue size in fly is safe, if size becomes smaller
+ * than current size, it will just not accept new entries until
+ * it has shrank enough.
+ */
+ spin_lock_bh(&list_lock);
+ list_for_each_entry(dd, &dev_list, list) {
+ spin_lock_irqsave(&dd->lock, flags);
+ dd->engine->queue.max_qlen = value;
+ dd->aead_queue.base.max_qlen = value;
+ spin_unlock_irqrestore(&dd->lock, flags);
+ }
+ spin_unlock_bh(&list_lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(queue_len);
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *omap_aes_attrs[] = {
+ &dev_attr_queue_len.attr,
+ &dev_attr_fallback.attr,
+ NULL,
+};
+
+static struct attribute_group omap_aes_attr_group = {
+ .attrs = omap_aes_attrs,
+};
+
static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1119,8 +1213,6 @@ static int omap_aes_probe(struct platform_device *pdev)
goto err_engine;
}
- dd->engine->prepare_cipher_request = omap_aes_prepare_req;
- dd->engine->cipher_one_request = omap_aes_crypt_req;
err = crypto_engine_start(dd->engine);
if (err)
goto err_engine;
@@ -1159,6 +1251,12 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
+ err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
+ if (err) {
+ dev_err(dev, "could not create sysfs device attrs\n");
+ goto err_aead_algs;
+ }
+
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 8906342e2b9a..fc3b46a85809 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -13,6 +13,8 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/engine.h>
+
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -95,6 +97,7 @@ struct omap_aes_gcm_result {
};
struct omap_aes_ctx {
+ struct crypto_engine_ctx enginectx;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 23e37779317e..2c42e4b4a6e9 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -104,6 +104,10 @@ static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
return OMAP_CRYPTO_NOT_ALIGNED;
if (!IS_ALIGNED(sg->length, bs))
return OMAP_CRYPTO_NOT_ALIGNED;
+#ifdef CONFIG_ZONE_DMA
+ if (page_zonenum(sg_page(sg)) != ZONE_DMA)
+ return OMAP_CRYPTO_NOT_ALIGNED;
+#endif
len += sg->length;
sg = sg_next(sg);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index ebc5c0f11f03..eb95b0d7f184 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -86,6 +86,7 @@
#define FLAGS_OUT_DATA_ST_SHIFT 10
struct omap_des_ctx {
+ struct crypto_engine_ctx enginectx;
struct omap_des_dev *dd;
int keylen;
@@ -498,7 +499,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
pr_debug("err: %d\n", err);
- crypto_finalize_cipher_request(dd->engine, req, err);
+ crypto_finalize_ablkcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -520,14 +521,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
struct ablkcipher_request *req)
{
if (req)
- return crypto_transfer_cipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
return 0;
}
static int omap_des_prepare_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
@@ -582,8 +584,9 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
}
static int omap_des_crypt_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
@@ -695,12 +698,23 @@ static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
return omap_des_crypt(req, FLAGS_CBC);
}
+static int omap_des_prepare_req(struct crypto_engine *engine,
+ void *areq);
+static int omap_des_crypt_req(struct crypto_engine *engine,
+ void *areq);
+
static int omap_des_cra_init(struct crypto_tfm *tfm)
{
+ struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
pr_debug("enter\n");
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+ ctx->enginectx.op.prepare_request = omap_des_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_des_crypt_req;
+
return 0;
}
@@ -1046,8 +1060,6 @@ static int omap_des_probe(struct platform_device *pdev)
goto err_engine;
}
- dd->engine->prepare_cipher_request = omap_des_prepare_req;
- dd->engine->cipher_one_request = omap_des_crypt_req;
err = crypto_engine_start(dd->engine);
if (err)
goto err_engine;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 86b89ace836f..ad02aa63b519 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -229,6 +229,7 @@ struct omap_sham_dev {
u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
unsigned long flags;
+ int fallback_sz;
struct crypto_queue queue;
struct ahash_request *req;
@@ -759,6 +760,13 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
while (nbytes > 0 && sg_tmp) {
n++;
+#ifdef CONFIG_ZONE_DMA
+ if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
+ aligned = false;
+ break;
+ }
+#endif
+
if (offset < sg_tmp->length) {
if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
aligned = false;
@@ -809,9 +817,6 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
bool final = rctx->flags & BIT(FLAGS_FINUP);
int xmit_len, hash_later;
- if (!req)
- return 0;
-
bs = get_block_size(rctx);
if (update)
@@ -1002,7 +1007,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
if (ctx->total < get_block_size(ctx) ||
- ctx->total < OMAP_SHA_DMA_THRESHOLD)
+ ctx->total < dd->fallback_sz)
ctx->flags |= BIT(FLAGS_CPU);
if (ctx->flags & BIT(FLAGS_CPU))
@@ -1258,11 +1263,11 @@ static int omap_sham_final(struct ahash_request *req)
/*
* OMAP HW accel works only with buffers >= 9.
* HMAC is always >= 9 because ipad == block size.
- * If buffersize is less than DMA_THRESHOLD, we use fallback
+ * If buffersize is less than fallback_sz, we use fallback
* SW encoding, as using DMA + HW in this case doesn't provide
* any benefit.
*/
- if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD)
+ if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
return omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
@@ -1761,7 +1766,7 @@ static void omap_sham_done_task(unsigned long data)
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
/* hash or semi-hash ready */
clear_bit(FLAGS_DMA_READY, &dd->flags);
- goto finish;
+ goto finish;
}
}
@@ -2013,6 +2018,85 @@ err:
return err;
}
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dd->fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+ ssize_t status;
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ /* HW accelerator only works with buffers > 9 */
+ if (value < 9) {
+ dev_err(dev, "minimum fallback size 9\n");
+ return -EINVAL;
+ }
+
+ dd->fallback_sz = value;
+
+ return size;
+}
+
+static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dd->queue.max_qlen);
+}
+
+static ssize_t queue_len_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+ ssize_t status;
+ long value;
+ unsigned long flags;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ if (value < 1)
+ return -EINVAL;
+
+ /*
+ * Changing the queue size in fly is safe, if size becomes smaller
+ * than current size, it will just not accept new entries until
+ * it has shrank enough.
+ */
+ spin_lock_irqsave(&dd->lock, flags);
+ dd->queue.max_qlen = value;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(queue_len);
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *omap_sham_attrs[] = {
+ &dev_attr_queue_len.attr,
+ &dev_attr_fallback.attr,
+ NULL,
+};
+
+static struct attribute_group omap_sham_attr_group = {
+ .attrs = omap_sham_attrs,
+};
+
static int omap_sham_probe(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
@@ -2074,6 +2158,8 @@ static int omap_sham_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+ dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
+
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
@@ -2111,6 +2197,12 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
+ err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
+ if (err) {
+ dev_err(dev, "could not create sysfs device attrs\n");
+ goto err_algs;
+ }
+
return 0;
err_algs:
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 4ef52c9d72fc..a4df966adbf6 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -499,10 +499,12 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
ctx->hash_key_len = keys.authkeylen;
+ memzero_explicit(&keys, sizeof(keys));
return 0;
badkey:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index baffae817259..1138e41d6805 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -546,11 +546,14 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
goto error;
+ memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
+ memzero_explicit(&keys, sizeof(keys));
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 13c52d6bf630..320e7854b4ee 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -969,7 +969,8 @@ unmap_src:
return ret;
}
-int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
@@ -1000,7 +1001,8 @@ err:
return ret;
}
-int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
@@ -1024,7 +1026,8 @@ int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
return 0;
}
-int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 5d64c08b7f47..bf7163042569 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -404,29 +404,31 @@ static const struct of_device_id s5p_sss_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
-static inline struct samsung_aes_variant *find_s5p_sss_version
- (struct platform_device *pdev)
+static inline const struct samsung_aes_variant *find_s5p_sss_version
+ (const struct platform_device *pdev)
{
if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
const struct of_device_id *match;
match = of_match_node(s5p_sss_dt_match,
pdev->dev.of_node);
- return (struct samsung_aes_variant *)match->data;
+ return (const struct samsung_aes_variant *)match->data;
}
- return (struct samsung_aes_variant *)
+ return (const struct samsung_aes_variant *)
platform_get_device_id(pdev)->driver_data;
}
static struct s5p_aes_dev *s5p_dev;
-static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
+ const struct scatterlist *sg)
{
SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
}
-static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
+ const struct scatterlist *sg)
{
SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
@@ -619,7 +621,7 @@ static inline void s5p_hash_write(struct s5p_aes_dev *dd,
* @sg: scatterlist ready to DMA transmit
*/
static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
- struct scatterlist *sg)
+ const struct scatterlist *sg)
{
dev->hash_sg_cnt--;
SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
@@ -792,9 +794,9 @@ static void s5p_hash_read_msg(struct ahash_request *req)
* @ctx: request context
*/
static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
- struct s5p_hash_reqctx *ctx)
+ const struct s5p_hash_reqctx *ctx)
{
- u32 *hash = (u32 *)ctx->digest;
+ const u32 *hash = (const u32 *)ctx->digest;
unsigned int i;
for (i = 0; i < ctx->nregs; i++)
@@ -818,7 +820,7 @@ static void s5p_hash_write_iv(struct ahash_request *req)
*/
static void s5p_hash_copy_result(struct ahash_request *req)
{
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
if (!req->result)
return;
@@ -1210,9 +1212,6 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
int xmit_len, hash_later, nbytes;
int ret;
- if (!req)
- return 0;
-
if (update)
nbytes = req->nbytes;
else
@@ -1293,7 +1292,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
*/
static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
{
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
@@ -1720,7 +1719,7 @@ static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
*/
static int s5p_hash_export(struct ahash_request *req, void *out)
{
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
@@ -1834,7 +1833,8 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
};
static void s5p_set_aes(struct s5p_aes_dev *dev,
- uint8_t *key, uint8_t *iv, unsigned int keylen)
+ const uint8_t *key, const uint8_t *iv,
+ unsigned int keylen)
{
void __iomem *keystart;
@@ -2153,7 +2153,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int i, j, err = -ENODEV;
- struct samsung_aes_variant *variant;
+ const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
unsigned int hash_i;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 08e7bdcaa6e3..0f2245e1af2b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1397,11 +1397,9 @@ static int sahara_probe(struct platform_device *pdev)
int err;
int i;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&pdev->dev, "unable to alloc data struct.\n");
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
- }
dev->device = &pdev->dev;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 4a06a7a665ee..c5d3efc54a4f 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -17,6 +17,7 @@
#include <crypto/des.h>
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
#define DRIVER_NAME "stm32-cryp"
@@ -29,8 +30,12 @@
#define FLG_ECB BIT(4)
#define FLG_CBC BIT(5)
#define FLG_CTR BIT(6)
+#define FLG_GCM BIT(7)
+#define FLG_CCM BIT(8)
/* Mode mask = bits [15..0] */
#define FLG_MODE_MASK GENMASK(15, 0)
+/* Bit [31..16] status */
+#define FLG_CCM_PADDED_WA BIT(16)
/* Registers */
#define CRYP_CR 0x00000000
@@ -53,6 +58,8 @@
#define CRYP_IV0RR 0x00000044
#define CRYP_IV1LR 0x00000048
#define CRYP_IV1RR 0x0000004C
+#define CRYP_CSGCMCCM0R 0x00000050
+#define CRYP_CSGCM0R 0x00000070
/* Registers values */
#define CR_DEC_NOT_ENC 0x00000004
@@ -64,6 +71,8 @@
#define CR_AES_CBC 0x00000028
#define CR_AES_CTR 0x00000030
#define CR_AES_KP 0x00000038
+#define CR_AES_GCM 0x00080000
+#define CR_AES_CCM 0x00080008
#define CR_AES_UNKNOWN 0xFFFFFFFF
#define CR_ALGO_MASK 0x00080038
#define CR_DATA32 0x00000000
@@ -75,6 +84,12 @@
#define CR_KEY256 0x00000200
#define CR_FFLUSH 0x00004000
#define CR_CRYPEN 0x00008000
+#define CR_PH_INIT 0x00000000
+#define CR_PH_HEADER 0x00010000
+#define CR_PH_PAYLOAD 0x00020000
+#define CR_PH_FINAL 0x00030000
+#define CR_PH_MASK 0x00030000
+#define CR_NBPBL_SHIFT 20
#define SR_BUSY 0x00000010
#define SR_OFNE 0x00000004
@@ -87,10 +102,17 @@
/* Misc */
#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define GCM_CTR_INIT 2
#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+struct stm32_cryp_caps {
+ bool swap_final;
+ bool padding_wa;
+};
+
struct stm32_cryp_ctx {
+ struct crypto_engine_ctx enginectx;
struct stm32_cryp *cryp;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
@@ -108,13 +130,16 @@ struct stm32_cryp {
struct clk *clk;
unsigned long flags;
u32 irq_status;
+ const struct stm32_cryp_caps *caps;
struct stm32_cryp_ctx *ctx;
struct crypto_engine *engine;
- struct mutex lock; /* protects req */
+ struct mutex lock; /* protects req / areq */
struct ablkcipher_request *req;
+ struct aead_request *areq;
+ size_t authsize;
size_t hw_blocksize;
size_t total_in;
@@ -137,6 +162,7 @@ struct stm32_cryp {
struct scatter_walk out_walk;
u32 last_ctr[4];
+ u32 gcm_ctr;
};
struct stm32_cryp_list {
@@ -179,6 +205,16 @@ static inline bool is_ctr(struct stm32_cryp *cryp)
return cryp->flags & FLG_CTR;
}
+static inline bool is_gcm(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_GCM;
+}
+
+static inline bool is_ccm(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CCM;
+}
+
static inline bool is_encrypt(struct stm32_cryp *cryp)
{
return cryp->flags & FLG_ENCRYPT;
@@ -207,6 +243,24 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
!(status & SR_BUSY), 10, 100000);
}
+static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status,
+ !(status & CR_CRYPEN), 10, 100000);
+}
+
+static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ status & SR_OFNE, 10, 100000);
+}
+
+static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
+
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
struct stm32_cryp *tmp, *cryp = NULL;
@@ -365,6 +419,12 @@ static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
if (is_aes(cryp) && is_ctr(cryp))
return CR_AES_CTR;
+ if (is_aes(cryp) && is_gcm(cryp))
+ return CR_AES_GCM;
+
+ if (is_aes(cryp) && is_ccm(cryp))
+ return CR_AES_CCM;
+
if (is_des(cryp) && is_ecb(cryp))
return CR_DES_ECB;
@@ -381,6 +441,79 @@ static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
return CR_AES_UNKNOWN;
}
+static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp)
+{
+ return is_encrypt(cryp) ? cryp->areq->cryptlen :
+ cryp->areq->cryptlen - cryp->authsize;
+}
+
+static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
+{
+ int ret;
+ u32 iv[4];
+
+ /* Phase 1 : init */
+ memcpy(iv, cryp->areq->iv, 12);
+ iv[3] = cpu_to_be32(GCM_CTR_INIT);
+ cryp->gcm_ctr = GCM_CTR_INIT;
+ stm32_cryp_hw_write_iv(cryp, iv);
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_enable(cryp);
+ if (ret)
+ dev_err(cryp->dev, "Timeout (gcm init)\n");
+
+ return ret;
+}
+
+static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
+{
+ int ret;
+ u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ u32 *d;
+ unsigned int i, textlen;
+
+ /* Phase 1 : init. Firstly set the CTR value to 1 (not 0) */
+ memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+ iv[AES_BLOCK_SIZE - 1] = 1;
+ stm32_cryp_hw_write_iv(cryp, (u32 *)iv);
+
+ /* Build B0 */
+ memcpy(b0, iv, AES_BLOCK_SIZE);
+
+ b0[0] |= (8 * ((cryp->authsize - 2) / 2));
+
+ if (cryp->areq->assoclen)
+ b0[0] |= 0x40;
+
+ textlen = stm32_cryp_get_input_text_len(cryp);
+
+ b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+ b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+ /* Enable HW */
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+
+ /* Write B0 */
+ d = (u32 *)b0;
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (!cryp->caps->padding_wa)
+ *d = cpu_to_be32(*d);
+ stm32_cryp_write(cryp, CRYP_DIN, *d++);
+ }
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_enable(cryp);
+ if (ret)
+ dev_err(cryp->dev, "Timeout (ccm init)\n");
+
+ return ret;
+}
+
static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
{
int ret;
@@ -436,6 +569,29 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
switch (hw_mode) {
+ case CR_AES_GCM:
+ case CR_AES_CCM:
+ /* Phase 1 : init */
+ if (hw_mode == CR_AES_CCM)
+ ret = stm32_cryp_ccm_init(cryp, cfg);
+ else
+ ret = stm32_cryp_gcm_init(cryp, cfg);
+
+ if (ret)
+ return ret;
+
+ /* Phase 2 : header (authenticated data) */
+ if (cryp->areq->assoclen) {
+ cfg |= CR_PH_HEADER;
+ } else if (stm32_cryp_get_input_text_len(cryp)) {
+ cfg |= CR_PH_PAYLOAD;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ cfg |= CR_PH_INIT;
+ }
+
+ break;
+
case CR_DES_CBC:
case CR_TDES_CBC:
case CR_AES_CBC:
@@ -452,12 +608,16 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
+ cryp->flags &= ~FLG_CCM_PADDED_WA;
+
return 0;
}
-static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
{
- int err = 0;
+ if (!err && (is_gcm(cryp) || is_ccm(cryp)))
+ /* Phase 4 : output tag */
+ err = stm32_cryp_read_auth_tag(cryp);
if (cryp->sgs_copied) {
void *buf_in, *buf_out;
@@ -478,8 +638,14 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
free_pages((unsigned long)buf_out, pages);
}
- crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
- cryp->req = NULL;
+ if (is_gcm(cryp) || is_ccm(cryp)) {
+ crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
+ cryp->areq = NULL;
+ } else {
+ crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
+ err);
+ cryp->req = NULL;
+ }
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
@@ -494,10 +660,36 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
return 0;
}
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ void *areq);
+
static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
{
+ struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+ ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
+ ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ return 0;
+}
+
+static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
+static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
+ void *areq);
+
+static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
+{
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
+ ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+
return 0;
}
@@ -513,7 +705,21 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
- return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = aead_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -555,6 +761,46 @@ static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return stm32_cryp_setkey(tfm, key, keylen);
}
+static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
+}
+
+static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
@@ -585,6 +831,26 @@ static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
+static int stm32_cryp_aes_gcm_encrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
+}
+
+static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
+}
+
static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
@@ -625,18 +891,19 @@ static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
-static int stm32_cryp_prepare_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
+ struct aead_request *areq)
{
struct stm32_cryp_ctx *ctx;
struct stm32_cryp *cryp;
struct stm32_cryp_reqctx *rctx;
int ret;
- if (!req)
+ if (!req && !areq)
return -EINVAL;
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
+ crypto_aead_ctx(crypto_aead_reqtfm(areq));
cryp = ctx->cryp;
@@ -645,7 +912,7 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
mutex_lock(&cryp->lock);
- rctx = ablkcipher_request_ctx(req);
+ rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
ctx->cryp = cryp;
@@ -654,15 +921,48 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
cryp->ctx = ctx;
- cryp->req = req;
- cryp->total_in = req->nbytes;
- cryp->total_out = cryp->total_in;
+ if (req) {
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+ } else {
+ /*
+ * Length of input and output data:
+ * Encryption case:
+ * INPUT = AssocData || PlainText
+ * <- assoclen -> <- cryptlen ->
+ * <------- total_in ----------->
+ *
+ * OUTPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <- cryptlen -> <- authsize ->
+ * <---------------- total_out ----------------->
+ *
+ * Decryption case:
+ * INPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <--------- cryptlen --------->
+ * <- authsize ->
+ * <---------------- total_in ------------------>
+ *
+ * OUTPUT = AssocData || PlainText
+ * <- assoclen -> <- crypten - authsize ->
+ * <---------- total_out ----------------->
+ */
+ cryp->areq = areq;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+ cryp->total_in = areq->assoclen + areq->cryptlen;
+ if (is_encrypt(cryp))
+ /* Append auth tag to output */
+ cryp->total_out = cryp->total_in + cryp->authsize;
+ else
+ /* No auth tag in output */
+ cryp->total_out = cryp->total_in - cryp->authsize;
+ }
cryp->total_in_save = cryp->total_in;
cryp->total_out_save = cryp->total_out;
- cryp->in_sg = req->src;
- cryp->out_sg = req->dst;
+ cryp->in_sg = req ? req->src : areq->src;
+ cryp->out_sg = req ? req->dst : areq->dst;
cryp->out_sg_save = cryp->out_sg;
cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
@@ -686,6 +986,12 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ if (is_gcm(cryp) || is_ccm(cryp)) {
+ /* In output, jump after assoc data */
+ scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
+ cryp->total_out -= cryp->areq->assoclen;
+ }
+
ret = stm32_cryp_hw_init(cryp);
out:
if (ret)
@@ -695,14 +1001,20 @@ out:
}
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
- return stm32_cryp_prepare_req(engine, req);
+ struct ablkcipher_request *req = container_of(areq,
+ struct ablkcipher_request,
+ base);
+
+ return stm32_cryp_prepare_req(req, NULL);
}
-static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
+ struct ablkcipher_request *req = container_of(areq,
+ struct ablkcipher_request,
+ base);
struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
@@ -713,6 +1025,34 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
return stm32_cryp_cpu_start(cryp);
}
+static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+
+ return stm32_cryp_prepare_req(NULL, req);
+}
+
+static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ if (unlikely(!cryp->areq->assoclen &&
+ !stm32_cryp_get_input_text_len(cryp))) {
+ /* No input data to process: get tag and finish */
+ stm32_cryp_finish_req(cryp, 0);
+ return 0;
+ }
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
unsigned int n)
{
@@ -745,6 +1085,111 @@ static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
return (u32 *)((u8 *)src + n);
}
+static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
+{
+ u32 cfg, size_bit, *dst, d32;
+ u8 *d8;
+ unsigned int i, j;
+ int ret = 0;
+
+ /* Update Config */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_FINAL;
+ cfg &= ~CR_DEC_NOT_ENC;
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ if (is_gcm(cryp)) {
+ /* GCM: write aad and payload size (in bits) */
+ size_bit = cryp->areq->assoclen * 8;
+ if (cryp->caps->swap_final)
+ size_bit = cpu_to_be32(size_bit);
+
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+
+ size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
+ cryp->areq->cryptlen - AES_BLOCK_SIZE;
+ size_bit *= 8;
+ if (cryp->caps->swap_final)
+ size_bit = cpu_to_be32(size_bit);
+
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+ } else {
+ /* CCM: write CTR0 */
+ u8 iv[AES_BLOCK_SIZE];
+ u32 *iv32 = (u32 *)iv;
+
+ memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (!cryp->caps->padding_wa)
+ *iv32 = cpu_to_be32(*iv32);
+ stm32_cryp_write(cryp, CRYP_DIN, *iv32++);
+ }
+ }
+
+ /* Wait for output data */
+ ret = stm32_cryp_wait_output(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (read tag)\n");
+ return ret;
+ }
+
+ if (is_encrypt(cryp)) {
+ /* Get and write tag */
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (cryp->total_out >= sizeof(u32)) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst,
+ sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+ } else {
+ /* Get and check tag */
+ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
+
+ scatterwalk_map_and_copy(in_tag, cryp->in_sg,
+ cryp->total_in_save - cryp->authsize,
+ cryp->authsize, 0);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ if (crypto_memneq(in_tag, out_tag, cryp->authsize))
+ ret = -EBADMSG;
+ }
+
+ /* Disable cryp */
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return ret;
+}
+
static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
{
u32 cr;
@@ -777,17 +1222,24 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
unsigned int i, j;
u32 d32, *dst;
u8 *d8;
+ size_t tag_size;
+
+ /* Do no read tag now (if any) */
+ if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
+ tag_size = cryp->authsize;
+ else
+ tag_size = 0;
dst = sg_virt(cryp->out_sg) + _walked_out;
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_out >= sizeof(u32))) {
+ if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
/* Read a full u32 */
*dst = stm32_cryp_read(cryp, CRYP_DOUT);
dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
cryp->total_out -= sizeof(u32);
- } else if (!cryp->total_out) {
+ } else if (cryp->total_out == tag_size) {
/* Empty fifo out (data from input padding) */
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
} else {
@@ -795,15 +1247,15 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
d8 = (u8 *)&d32;
- for (j = 0; j < cryp->total_out; j++) {
+ for (j = 0; j < cryp->total_out - tag_size; j++) {
*((u8 *)dst) = *(d8++);
dst = stm32_cryp_next_out(cryp, dst, 1);
}
- cryp->total_out = 0;
+ cryp->total_out = tag_size;
}
}
- return !cryp->total_out || !cryp->total_in;
+ return !(cryp->total_out - tag_size) || !cryp->total_in;
}
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
@@ -811,33 +1263,219 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
unsigned int i, j;
u32 *src;
u8 d8[4];
+ size_t tag_size;
+
+ /* Do no write tag (if any) */
+ if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
+ tag_size = cryp->authsize;
+ else
+ tag_size = 0;
src = sg_virt(cryp->in_sg) + _walked_in;
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_in >= sizeof(u32))) {
+ if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
/* Write a full u32 */
stm32_cryp_write(cryp, CRYP_DIN, *src);
src = stm32_cryp_next_in(cryp, src, sizeof(u32));
cryp->total_in -= sizeof(u32);
- } else if (!cryp->total_in) {
+ } else if (cryp->total_in == tag_size) {
/* Write padding data */
stm32_cryp_write(cryp, CRYP_DIN, 0);
} else {
/* Write less than an u32 */
memset(d8, 0, sizeof(u32));
- for (j = 0; j < cryp->total_in; j++) {
+ for (j = 0; j < cryp->total_in - tag_size; j++) {
d8[j] = *((u8 *)src);
src = stm32_cryp_next_in(cryp, src, 1);
}
stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- cryp->total_in = 0;
+ cryp->total_in = tag_size;
}
}
}
+static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
+{
+ int err;
+ u32 cfg, tmp[AES_BLOCK_32];
+ size_t total_in_ori = cryp->total_in;
+ struct scatterlist *out_sg_ori = cryp->out_sg;
+ unsigned int i;
+
+ /* 'Special workaround' procedure described in the datasheet */
+
+ /* a) disable ip */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) Update IV1R */
+ stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2);
+
+ /* c) change mode to CTR */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_CTR;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* a) enable IP */
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) pad and write the last block */
+ stm32_cryp_irq_write_block(cryp);
+ cryp->total_in = total_in_ori;
+ err = stm32_cryp_wait_output(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ /* c) get and store encrypted data */
+ stm32_cryp_irq_read_data(cryp);
+ scatterwalk_map_and_copy(tmp, out_sg_ori,
+ cryp->total_in_save - total_in_ori,
+ total_in_ori, 0);
+
+ /* d) change mode back to AES GCM */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_GCM;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* e) change phase to Final */
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_FINAL;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* f) write padded data */
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (cryp->total_in)
+ stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+ else
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+ cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
+ }
+
+ /* g) Empty fifo out */
+ err = stm32_cryp_wait_output(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_read(cryp, CRYP_DOUT);
+
+ /* h) run the he normal Final phase */
+ stm32_cryp_finish_req(cryp, 0);
+}
+
+static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
+{
+ u32 cfg, payload_bytes;
+
+ /* disable ip, set NPBLB and reneable ip */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
+ cryp->total_in;
+ cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+}
+
+static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
+{
+ int err = 0;
+ u32 cfg, iv1tmp;
+ u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
+ size_t last_total_out, total_in_ori = cryp->total_in;
+ struct scatterlist *out_sg_ori = cryp->out_sg;
+ unsigned int i;
+
+ /* 'Special workaround' procedure described in the datasheet */
+ cryp->flags |= FLG_CCM_PADDED_WA;
+
+ /* a) disable ip */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) get IV1 from CRYP_CSGCMCCM7 */
+ iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4);
+
+ /* c) Load CRYP_CSGCMCCMxR */
+ for (i = 0; i < ARRAY_SIZE(cstmp1); i++)
+ cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
+
+ /* d) Write IV1R */
+ stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp);
+
+ /* e) change mode to CTR */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_CTR;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* a) enable IP */
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) pad and write the last block */
+ stm32_cryp_irq_write_block(cryp);
+ cryp->total_in = total_in_ori;
+ err = stm32_cryp_wait_output(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ /* c) get and store decrypted data */
+ last_total_out = cryp->total_out;
+ stm32_cryp_irq_read_data(cryp);
+
+ memset(tmp, 0, sizeof(tmp));
+ scatterwalk_map_and_copy(tmp, out_sg_ori,
+ cryp->total_out_save - last_total_out,
+ last_total_out, 0);
+
+ /* d) Load again CRYP_CSGCMCCMxR */
+ for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
+ cstmp2[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
+
+ /* e) change mode back to AES CCM */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_CCM;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* f) change phase to header */
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_HEADER;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* g) XOR and write padded data */
+ for (i = 0; i < ARRAY_SIZE(tmp); i++) {
+ tmp[i] ^= cstmp1[i];
+ tmp[i] ^= cstmp2[i];
+ stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+ }
+
+ /* h) wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err)
+ dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+
+ /* i) run the he normal Final phase */
+ stm32_cryp_finish_req(cryp, err);
+}
+
static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
{
if (unlikely(!cryp->total_in)) {
@@ -845,28 +1483,220 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
return;
}
+ if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
+ (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
+ is_encrypt(cryp))) {
+ /* Padding for AES GCM encryption */
+ if (cryp->caps->padding_wa)
+ /* Special case 1 */
+ return stm32_cryp_irq_write_gcm_padded_data(cryp);
+
+ /* Setting padding bytes (NBBLB) */
+ stm32_cryp_irq_set_npblb(cryp);
+ }
+
+ if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
+ (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
+ is_decrypt(cryp))) {
+ /* Padding for AES CCM decryption */
+ if (cryp->caps->padding_wa)
+ /* Special case 2 */
+ return stm32_cryp_irq_write_ccm_padded_data(cryp);
+
+ /* Setting padding bytes (NBBLB) */
+ stm32_cryp_irq_set_npblb(cryp);
+ }
+
if (is_aes(cryp) && is_ctr(cryp))
stm32_cryp_check_ctr_counter(cryp);
stm32_cryp_irq_write_block(cryp);
}
+static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
+{
+ int err;
+ unsigned int i, j;
+ u32 cfg, *src;
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
+
+ /* Check if whole header written */
+ if ((cryp->total_in_save - cryp->total_in) ==
+ cryp->areq->assoclen) {
+ /* Write padding if needed */
+ for (j = i + 1; j < AES_BLOCK_32; j++)
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+ /* Wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (gcm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ if (stm32_cryp_get_input_text_len(cryp)) {
+ /* Phase 3 : payload */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_PAYLOAD;
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ /* Phase 4 : tag */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp, 0);
+ }
+
+ break;
+ }
+
+ if (!cryp->total_in)
+ break;
+ }
+}
+
+static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
+{
+ int err;
+ unsigned int i = 0, j, k;
+ u32 alen, cfg, *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+ alen = cryp->areq->assoclen;
+
+ if (!_walked_in) {
+ if (cryp->areq->assoclen <= 65280) {
+ /* Write first u32 of B1 */
+ d8[0] = (alen >> 8) & 0xFF;
+ d8[1] = alen & 0xFF;
+ d8[2] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ d8[3] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ i++;
+
+ cryp->total_in -= min_t(size_t, 2, cryp->total_in);
+ } else {
+ /* Build the two first u32 of B1 */
+ d8[0] = 0xFF;
+ d8[1] = 0xFE;
+ d8[2] = alen & 0xFF000000;
+ d8[3] = alen & 0x00FF0000;
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ i++;
+
+ d8[0] = alen & 0x0000FF00;
+ d8[1] = alen & 0x000000FF;
+ d8[2] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ d8[3] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ i++;
+
+ cryp->total_in -= min_t(size_t, 2, cryp->total_in);
+ }
+ }
+
+ /* Write next u32 */
+ for (; i < AES_BLOCK_32; i++) {
+ /* Build an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (k = 0; k < sizeof(u32); k++) {
+ d8[k] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+
+ cryp->total_in -= min_t(size_t, 1, cryp->total_in);
+ if ((cryp->total_in_save - cryp->total_in) == alen)
+ break;
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+
+ if ((cryp->total_in_save - cryp->total_in) == alen) {
+ /* Write padding if needed */
+ for (j = i + 1; j < AES_BLOCK_32; j++)
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+ /* Wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (ccm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ if (stm32_cryp_get_input_text_len(cryp)) {
+ /* Phase 3 : payload */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_PAYLOAD;
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ /* Phase 4 : tag */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp, 0);
+ }
+
+ break;
+ }
+ }
+}
+
static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
{
struct stm32_cryp *cryp = arg;
+ u32 ph;
if (cryp->irq_status & MISR_OUT)
/* Output FIFO IRQ: read data */
if (unlikely(stm32_cryp_irq_read_data(cryp))) {
/* All bytes processed, finish */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp);
+ stm32_cryp_finish_req(cryp, 0);
return IRQ_HANDLED;
}
if (cryp->irq_status & MISR_IN) {
- /* Input FIFO IRQ: write data */
- stm32_cryp_irq_write_data(cryp);
+ if (is_gcm(cryp)) {
+ ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+ if (unlikely(ph == CR_PH_HEADER))
+ /* Write Header */
+ stm32_cryp_irq_write_gcm_header(cryp);
+ else
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ cryp->gcm_ctr++;
+ } else if (is_ccm(cryp)) {
+ ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+ if (unlikely(ph == CR_PH_HEADER))
+ /* Write Header */
+ stm32_cryp_irq_write_ccm_header(cryp);
+ else
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ } else {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
}
return IRQ_HANDLED;
@@ -1028,8 +1858,62 @@ static struct crypto_alg crypto_algs[] = {
},
};
+static struct aead_alg aead_algs[] = {
+{
+ .setkey = stm32_cryp_aes_aead_setkey,
+ .setauthsize = stm32_cryp_aes_gcm_setauthsize,
+ .encrypt = stm32_cryp_aes_gcm_encrypt,
+ .decrypt = stm32_cryp_aes_gcm_decrypt,
+ .init = stm32_cryp_aes_aead_init,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "stm32-gcm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+},
+{
+ .setkey = stm32_cryp_aes_aead_setkey,
+ .setauthsize = stm32_cryp_aes_ccm_setauthsize,
+ .encrypt = stm32_cryp_aes_ccm_encrypt,
+ .decrypt = stm32_cryp_aes_ccm_decrypt,
+ .init = stm32_cryp_aes_aead_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "stm32-ccm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+},
+};
+
+static const struct stm32_cryp_caps f7_data = {
+ .swap_final = true,
+ .padding_wa = true,
+};
+
+static const struct stm32_cryp_caps mp1_data = {
+ .swap_final = false,
+ .padding_wa = false,
+};
+
static const struct of_device_id stm32_dt_ids[] = {
- { .compatible = "st,stm32f756-cryp", },
+ { .compatible = "st,stm32f756-cryp", .data = &f7_data},
+ { .compatible = "st,stm32mp1-cryp", .data = &mp1_data},
{},
};
MODULE_DEVICE_TABLE(of, stm32_dt_ids);
@@ -1046,6 +1930,10 @@ static int stm32_cryp_probe(struct platform_device *pdev)
if (!cryp)
return -ENOMEM;
+ cryp->caps = of_device_get_match_data(dev);
+ if (!cryp->caps)
+ return -ENODEV;
+
cryp->dev = dev;
mutex_init(&cryp->lock);
@@ -1102,9 +1990,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine1;
}
- cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
- cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
-
ret = crypto_engine_start(cryp->engine);
if (ret) {
dev_err(dev, "Could not start crypto engine\n");
@@ -1117,10 +2002,16 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_algs;
}
+ ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ goto err_aead_algs;
+
dev_info(dev, "Initialized\n");
return 0;
+err_aead_algs:
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -1141,6 +2032,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (!cryp)
return -ENODEV;
+ crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4ca4a264a833..981e45692695 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -122,6 +122,7 @@ enum stm32_hash_data_format {
#define HASH_DMA_THRESHOLD 50
struct stm32_hash_ctx {
+ struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
unsigned long flags;
@@ -626,7 +627,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
writesl(hdev->io_base + HASH_DIN, buffer,
DIV_ROUND_UP(ncp, sizeof(u32)));
}
- stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
+ stm32_hash_set_nblw(hdev, ncp);
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg);
@@ -743,13 +744,15 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
struct ahash_request *req = hdev->req;
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
int err;
+ int buflen = rctx->bufcnt;
+
+ rctx->bufcnt = 0;
if (!(rctx->flags & HASH_FLAGS_CPU))
err = stm32_hash_dma_send(hdev);
else
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
- rctx->bufcnt = 0;
return err;
}
@@ -828,15 +831,19 @@ static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
return 0;
}
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
+static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
+
static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
struct ahash_request *req)
{
return crypto_transfer_hash_request_to_engine(hdev->engine, req);
}
-static int stm32_hash_prepare_req(struct crypto_engine *engine,
- struct ahash_request *req)
+static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_request_ctx *rctx;
@@ -854,9 +861,10 @@ static int stm32_hash_prepare_req(struct crypto_engine *engine,
return stm32_hash_hw_init(hdev, rctx);
}
-static int stm32_hash_one_request(struct crypto_engine *engine,
- struct ahash_request *req)
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_request_ctx *rctx;
@@ -1033,6 +1041,9 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
if (algs_hmac_name)
ctx->flags |= HASH_FLAGS_HMAC;
+ ctx->enginectx.op.do_one_request = stm32_hash_one_request;
+ ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@@ -1096,6 +1107,8 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
reg &= ~HASH_SR_OUTPUT_READY;
stm32_hash_write(hdev, HASH_SR, reg);
hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ /* Disable IT*/
+ stm32_hash_write(hdev, HASH_IMR, 0);
return IRQ_WAKE_THREAD;
}
@@ -1404,18 +1417,19 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
struct device *dev)
{
- int err;
-
hdev->pdata = of_device_get_match_data(dev);
if (!hdev->pdata) {
dev_err(dev, "no compatible OF match\n");
return -EINVAL;
}
- err = of_property_read_u32(dev->of_node, "dma-maxburst",
- &hdev->dma_maxburst);
+ if (of_property_read_u32(dev->of_node, "dma-maxburst",
+ &hdev->dma_maxburst)) {
+ dev_info(dev, "dma-maxburst not specified, using 0\n");
+ hdev->dma_maxburst = 0;
+ }
- return err;
+ return 0;
}
static int stm32_hash_probe(struct platform_device *pdev)
@@ -1493,9 +1507,6 @@ static int stm32_hash_probe(struct platform_device *pdev)
goto err_engine;
}
- hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
- hdev->engine->hash_one_request = stm32_hash_one_request;
-
ret = crypto_engine_start(hdev->engine);
if (ret)
goto err_engine_start;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 1547cbe13dc2..a81d89b3b7d8 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = {
module_platform_driver(sun4i_ss_driver);
+MODULE_ALIAS("platform:sun4i-ss");
MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6882fa2f8bad..7cebf0a6ffbc 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -104,16 +104,34 @@ static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
+static void __map_single_talitos_ptr(struct device *dev,
+ struct talitos_ptr *ptr,
+ unsigned int len, void *data,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ to_talitos_ptr(ptr, dma_addr, len, is_sec1);
+}
+
static void map_single_talitos_ptr(struct device *dev,
struct talitos_ptr *ptr,
unsigned int len, void *data,
enum dma_data_direction dir)
{
- dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
+ __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
+}
- to_talitos_ptr(ptr, dma_addr, len, is_sec1);
+static void map_single_talitos_ptr_nosync(struct device *dev,
+ struct talitos_ptr *ptr,
+ unsigned int len, void *data,
+ enum dma_data_direction dir)
+{
+ __map_single_talitos_ptr(dev, ptr, len, data, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
/*
@@ -832,8 +850,6 @@ struct talitos_ctx {
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
- dma_addr_t dma_buf;
- dma_addr_t dma_hw_context;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -888,10 +904,12 @@ static int aead_setkey(struct crypto_aead *authenc,
ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
DMA_TO_DEVICE);
+ memzero_explicit(&keys, sizeof(keys));
return 0;
badkey:
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -1130,10 +1148,10 @@ next:
return count;
}
-static int talitos_sg_map(struct device *dev, struct scatterlist *src,
- unsigned int len, struct talitos_edesc *edesc,
- struct talitos_ptr *ptr,
- int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1142,6 +1160,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, 0, 0, is_sec1);
return 1;
}
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
@@ -1150,7 +1169,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
&edesc->link_tbl[tbl_off]);
if (sg_count == 1) {
/* Only one segment now, so no link tbl needed*/
@@ -1164,6 +1183,15 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
return sg_count;
}
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off)
+{
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+ tbl_off, 0);
+}
+
/*
* fill in and submit ipsec_esp descriptor
*/
@@ -1181,7 +1209,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
- int sg_link_tbl_len;
+ int elen = 0;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1223,17 +1251,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- sg_link_tbl_len = cryptlen;
-
- if (is_ipsec_esp) {
- to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
+ if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+ elen = authsize;
- if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len += authsize;
- }
-
- ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
- &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen);
if (ret > 1) {
tbl_off += ret;
@@ -1404,7 +1426,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- dev_err(dev, "could not allocate edescriptor\n");
err = ERR_PTR(-ENOMEM);
goto error_sg;
}
@@ -1690,9 +1711,30 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ struct talitos_desc *desc = &edesc->desc;
+ struct talitos_desc *desc2 = desc + 1;
+
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+ if (desc->next_desc &&
+ desc->ptr[5].ptr != desc2->ptr[5].ptr)
+ unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+ /* When using hashctx-in, must unmap it. */
+ if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+ DMA_TO_DEVICE);
+ else if (desc->next_desc)
+ unmap_single_talitos_ptr(dev, &desc2->ptr[1],
+ DMA_TO_DEVICE);
+
+ if (is_sec1 && req_ctx->nbuf)
+ unmap_single_talitos_ptr(dev, &desc->ptr[3],
+ DMA_TO_DEVICE);
+
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
@@ -1766,8 +1808,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* hash context in */
if (!req_ctx->first || req_ctx->swinit) {
- to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_TO_DEVICE);
req_ctx->swinit = 0;
}
/* Indicate next op is not the first. */
@@ -1793,10 +1837,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
* data in
*/
if (is_sec1 && req_ctx->nbuf) {
- dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
- HASH_MAX_BLOCK_SIZE;
-
- to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
+ map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
+ req_ctx->buf[req_ctx->buf_idx],
+ DMA_TO_DEVICE);
} else {
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
&desc->ptr[3], sg_count, offset, 0);
@@ -1812,8 +1855,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
crypto_ahash_digestsize(tfm),
areq->result, DMA_FROM_DEVICE);
else
- to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_FROM_DEVICE);
/* last DWORD empty */
@@ -1832,9 +1877,14 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
- to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
-
+ if (desc->ptr[1].ptr)
+ copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
+ is_sec1);
+ else
+ map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_TO_DEVICE);
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
&desc2->ptr[3], sg_count, offset, 0);
@@ -1842,8 +1892,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
if (req_ctx->last)
- to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_FROM_DEVICE);
next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
DMA_BIDIRECTIONAL);
@@ -1885,8 +1937,7 @@ static int ahash_init(struct ahash_request *areq)
struct device *dev = ctx->dev;
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
unsigned int size;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
+ dma_addr_t dma;
/* Initialize the context */
req_ctx->buf_idx = 0;
@@ -1898,18 +1949,10 @@ static int ahash_init(struct ahash_request *areq)
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
req_ctx->hw_context_size = size;
- if (ctx->dma_hw_context)
- dma_unmap_single(dev, ctx->dma_hw_context, size,
- DMA_BIDIRECTIONAL);
- ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
- DMA_BIDIRECTIONAL);
- if (ctx->dma_buf)
- dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
- if (is_sec1)
- ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
- sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -1920,12 +1963,6 @@ static int ahash_init(struct ahash_request *areq)
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = ctx->dev;
-
- ahash_init(areq);
- req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
req_ctx->hw_context[0] = SHA224_H0;
req_ctx->hw_context[1] = SHA224_H1;
@@ -1940,8 +1977,8 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
req_ctx->hw_context[8] = 0;
req_ctx->hw_context[9] = 0;
- dma_sync_single_for_device(dev, ctx->dma_hw_context,
- req_ctx->hw_context_size, DMA_TO_DEVICE);
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
return 0;
}
@@ -2046,13 +2083,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
/* request SEC to INIT hash. */
if (req_ctx->first && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
- if (is_sec1) {
- dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
- HASH_MAX_BLOCK_SIZE;
-
- dma_sync_single_for_device(dev, dma_buf,
- req_ctx->nbuf, DMA_TO_DEVICE);
- }
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
@@ -2106,12 +2136,15 @@ static int ahash_export(struct ahash_request *areq, void *out)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct talitos_export_state *export = out;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = ctx->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
- dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
- req_ctx->hw_context_size, DMA_FROM_DEVICE);
memcpy(export->hw_context, req_ctx->hw_context,
req_ctx->hw_context_size);
memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
@@ -2128,39 +2161,29 @@ static int ahash_import(struct ahash_request *areq, const void *in)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- const struct talitos_export_state *export = in;
- unsigned int size;
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = ctx->dev;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
+ const struct talitos_export_state *export = in;
+ unsigned int size;
+ dma_addr_t dma;
memset(req_ctx, 0, sizeof(*req_ctx));
size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
req_ctx->hw_context_size = size;
- if (ctx->dma_hw_context)
- dma_unmap_single(dev, ctx->dma_hw_context, size,
- DMA_BIDIRECTIONAL);
-
memcpy(req_ctx->hw_context, export->hw_context, size);
- ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
- DMA_BIDIRECTIONAL);
- if (ctx->dma_buf)
- dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
memcpy(req_ctx->buf[0], export->buf, export->nbuf);
- if (is_sec1)
- ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
- sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
req_ctx->swinit = export->swinit;
req_ctx->first = export->first;
req_ctx->last = export->last;
req_ctx->to_hash_later = export->to_hash_later;
req_ctx->nbuf = export->nbuf;
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -3064,27 +3087,6 @@ static void talitos_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
}
-static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
-{
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- unsigned int size;
-
- talitos_cra_exit(tfm);
-
- size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
- SHA256_DIGEST_SIZE)
- ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
- : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
-
- if (ctx->dma_hw_context)
- dma_unmap_single(dev, ctx->dma_hw_context, size,
- DMA_BIDIRECTIONAL);
- if (ctx->dma_buf)
- dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
- DMA_TO_DEVICE);
-}
-
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -3183,7 +3185,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
- alg->cra_exit = talitos_cra_exit_ahash;
+ alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 765f53e548ab..cb31b59c9d53 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1404,9 +1404,8 @@ static void cryp_algs_unregister_all(void)
static int ux500_cryp_probe(struct platform_device *pdev)
{
int ret;
- int cryp_error = 0;
- struct resource *res = NULL;
- struct resource *res_irq = NULL;
+ struct resource *res;
+ struct resource *res_irq;
struct cryp_device_data *device_data;
struct cryp_protection_config prot = {
.privilege_access = CRYP_STATE_ENABLE
@@ -1416,7 +1415,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
dev_dbg(dev, "[%s]", __func__);
device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
if (!device_data) {
- dev_err(dev, "[%s]: kzalloc() failed!", __func__);
ret = -ENOMEM;
goto out;
}
@@ -1479,15 +1477,13 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_clk_unprepare;
}
- cryp_error = cryp_check(device_data);
- if (cryp_error != 0) {
- dev_err(dev, "[%s]: cryp_init() failed!", __func__);
+ if (cryp_check(device_data)) {
+ dev_err(dev, "[%s]: cryp_check() failed!", __func__);
ret = -EINVAL;
goto out_power;
}
- cryp_error = cryp_configure_protection(device_data, &prot);
- if (cryp_error != 0) {
+ if (cryp_configure_protection(device_data, &prot)) {
dev_err(dev, "[%s]: cryp_configure_protection() failed!",
__func__);
ret = -EINVAL;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 9acccad26928..2d0a677bcc76 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1403,6 +1403,16 @@ out:
return ret1 ? ret1 : ret2;
}
+static int ahash_noimport(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int ahash_noexport(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
static int hmac_sha1_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1507,6 +1517,8 @@ static struct hash_algo_template hash_algs[] = {
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha1_digest,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
@@ -1529,6 +1541,8 @@ static struct hash_algo_template hash_algs[] = {
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha256_digest,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
@@ -1553,6 +1567,8 @@ static struct hash_algo_template hash_algs[] = {
.final = ahash_final,
.digest = hmac_sha1_digest,
.setkey = hmac_sha1_setkey,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
@@ -1577,6 +1593,8 @@ static struct hash_algo_template hash_algs[] = {
.final = ahash_final,
.digest = hmac_sha256_digest,
.setkey = hmac_sha256_setkey,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 5db07495ddc5..a4324b1383a4 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -2,7 +2,6 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver"
depends on VIRTIO
select CRYPTO_AEAD
- select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
default m
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index abe8c15450df..ba190cfa7aa1 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -29,6 +29,7 @@
struct virtio_crypto_ablkcipher_ctx {
+ struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_tfm *tfm;
@@ -491,7 +492,7 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
vc_sym_req->ablkcipher_req = req;
vc_sym_req->encrypt = true;
- return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
@@ -511,7 +512,7 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
vc_sym_req->ablkcipher_req = req;
vc_sym_req->encrypt = false;
- return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
@@ -521,6 +522,9 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
ctx->tfm = tfm;
+ ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@@ -538,9 +542,9 @@ static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
}
int virtio_crypto_ablkcipher_crypt_req(
- struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ struct crypto_engine *engine, void *vreq)
{
+ struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
struct virtio_crypto_sym_request *vc_sym_req =
ablkcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
@@ -561,8 +565,8 @@ static void virtio_crypto_ablkcipher_finalize_req(
struct ablkcipher_request *req,
int err)
{
- crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
- req, err);
+ crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e976539a05d9..66501a5a2b7b 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
-#include <crypto/authenc.h>
#include <crypto/engine.h>
@@ -107,8 +106,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
- struct crypto_engine *engine,
- struct ablkcipher_request *req);
+ struct crypto_engine *engine, void *vreq);
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index ff1410a32c2b..83326986c113 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -111,9 +111,6 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
-
- vi->data_vq[i].engine->cipher_one_request =
- virtio_crypto_ablkcipher_crypt_req;
}
kfree(names);
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index c94dfe8adb63..168191fa0357 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-config CRYPTO_DEV_CCREE
+config CRYPTO_DEV_CCREE_OLD
tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
- depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
+ depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA && BROKEN
default n
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index bdc27970f95f..553db5c45354 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+obj-$(CONFIG_CRYPTO_DEV_CCREE_OLD) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
diff --git a/include/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
deleted file mode 100644
index 4e655c2a4e15..000000000000
--- a/include/crypto/ablk_helper.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Shared async block cipher helpers
- */
-
-#ifndef _CRYPTO_ABLK_HELPER_H
-#define _CRYPTO_ABLK_HELPER_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/cryptd.h>
-
-struct async_helper_ctx {
- struct cryptd_ablkcipher *cryptd_tfm;
-};
-
-extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len);
-
-extern int __ablk_encrypt(struct ablkcipher_request *req);
-
-extern int ablk_encrypt(struct ablkcipher_request *req);
-
-extern int ablk_decrypt(struct ablkcipher_request *req);
-
-extern void ablk_exit(struct crypto_tfm *tfm);
-
-extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
-
-extern int ablk_init(struct crypto_tfm *tfm);
-
-#endif /* _CRYPTO_ABLK_HELPER_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e3cebf640c00..1aba888241dd 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -30,7 +30,6 @@ struct crypto_type {
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void (*free)(struct crypto_instance *inst);
unsigned int type;
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index dd04c1699b51..1cbec29af3d6 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -17,7 +17,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/akcipher.h>
#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#define ENGINE_NAME_LEN 30
/*
@@ -37,12 +40,6 @@
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
- * @prepare_cipher_request: do some prepare if need before handle the current request
- * @unprepare_cipher_request: undo any work done by prepare_cipher_request()
- * @cipher_one_request: do encryption for current request
- * @prepare_hash_request: do some prepare if need before handle the current request
- * @unprepare_hash_request: undo any work done by prepare_hash_request()
- * @hash_one_request: do hash for current request
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -65,19 +62,6 @@ struct crypto_engine {
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*prepare_cipher_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*unprepare_cipher_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*prepare_hash_request)(struct crypto_engine *engine,
- struct ahash_request *req);
- int (*unprepare_hash_request)(struct crypto_engine *engine,
- struct ahash_request *req);
- int (*cipher_one_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*hash_one_request)(struct crypto_engine *engine,
- struct ahash_request *req);
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
@@ -85,19 +69,45 @@ struct crypto_engine {
struct crypto_async_request *cur_req;
};
-int crypto_transfer_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req,
- bool need_pump);
-int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req);
-int crypto_transfer_hash_request(struct crypto_engine *engine,
- struct ahash_request *req, bool need_pump);
+/*
+ * struct crypto_engine_op - crypto hardware engine operations
+ * @prepare__request: do some prepare if need before handle the current request
+ * @unprepare_request: undo any work done by prepare_request()
+ * @do_one_request: do encryption for current request
+ */
+struct crypto_engine_op {
+ int (*prepare_request)(struct crypto_engine *engine,
+ void *areq);
+ int (*unprepare_request)(struct crypto_engine *engine,
+ void *areq);
+ int (*do_one_request)(struct crypto_engine *engine,
+ void *areq);
+};
+
+struct crypto_engine_ctx {
+ struct crypto_engine_op op;
+};
+
+int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req);
+int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
+ struct aead_request *req);
+int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
+ struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
- struct ahash_request *req);
-void crypto_finalize_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
+ struct ahash_request *req);
+int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
+ struct skcipher_request *req);
+void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err);
+void crypto_finalize_aead_request(struct crypto_engine *engine,
+ struct aead_request *req, int err);
+void crypto_finalize_akcipher_request(struct crypto_engine *engine,
+ struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_skcipher_request(struct crypto_engine *engine,
+ struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2d1849dffb80..76e432cab75d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -74,7 +74,8 @@ struct ahash_request {
* @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
- * transformation. No data processing happens at this point.
+ * transformation. No data processing happens at this point. Driver code
+ * implementation must not use req->result.
* @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
@@ -83,7 +84,8 @@ struct ahash_request {
* transformation. This function shall not modify the transformation
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
- * [SHASH] or asynchronously [AHASH] at this point.
+ * [SHASH] or asynchronously [AHASH] at this point. Driver must not use
+ * req->result.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
@@ -120,11 +122,12 @@ struct ahash_request {
* you want to save partial result of the transformation after
* processing certain amount of data and reload this partial result
* multiple times later on for multiple re-use. No data processing
- * happens at this point.
+ * happens at this point. Driver must not use req->result.
* @import: Import partial state of the transformation. This function loads the
* entire state of the ongoing transformation from a provided block of
* data so the transformation can continue from this point onward. No
- * data processing happens at this point.
+ * data processing happens at this point. Driver must not use
+ * req->result.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 27040a46d50a..a0b0ad9d585e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -126,11 +126,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
-int ahash_mcryptd_update(struct ahash_request *desc);
-int ahash_mcryptd_final(struct ahash_request *desc);
-int ahash_mcryptd_finup(struct ahash_request *desc);
-int ahash_mcryptd_digest(struct ahash_request *desc);
-
int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index 32ceb6929885..f18344518e32 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -7,6 +7,7 @@
#define _CRYPTO_INTERNAL_SIMD_H
struct simd_skcipher_alg;
+struct skcipher_alg;
struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
const char *drvname,
@@ -15,4 +16,10 @@ struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
+int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs);
+
+void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs);
+
#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h
deleted file mode 100644
index a9d44c06d081..000000000000
--- a/include/crypto/lrw.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CRYPTO_LRW_H
-#define _CRYPTO_LRW_H
-
-#include <crypto/b128ops.h>
-
-struct scatterlist;
-struct gf128mul_64k;
-struct blkcipher_desc;
-
-#define LRW_BLOCK_SIZE 16
-
-struct lrw_table_ctx {
- /* optimizes multiplying a random (non incrementing, as at the
- * start of a new sector) value with key2, we could also have
- * used 4k optimization tables or no optimization at all. In the
- * latter case we would have to store key2 here */
- struct gf128mul_64k *table;
- /* stores:
- * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
- * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
- * key2*{ 0,0,...1,1,1,1,1 }, etc
- * needed for optimized multiplication of incrementing values
- * with key2 */
- be128 mulinc[128];
-};
-
-int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
-void lrw_free_table(struct lrw_table_ctx *ctx);
-
-struct lrw_crypt_req {
- be128 *tbuf;
- unsigned int tbuflen;
-
- struct lrw_table_ctx *table_ctx;
- void *crypt_ctx;
- void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
-};
-
-int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- struct lrw_crypt_req *req);
-
-#endif /* _CRYPTO_LRW_H */
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
new file mode 100644
index 000000000000..b64e64d20b28
--- /dev/null
+++ b/include/crypto/sm4.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Common values for the SM4 algorithm
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ */
+
+#ifndef _CRYPTO_SM4_H
+#define _CRYPTO_SM4_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define SM4_KEY_SIZE 16
+#define SM4_BLOCK_SIZE 16
+#define SM4_RKEY_WORDS 32
+
+struct crypto_sm4_ctx {
+ u32 rkey_enc[SM4_RKEY_WORDS];
+ u32 rkey_dec[SM4_RKEY_WORDS];
+};
+
+int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+
+#endif
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
new file mode 100644
index 000000000000..73cfc952d405
--- /dev/null
+++ b/include/crypto/speck.h
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common values for the Speck algorithm
+ */
+
+#ifndef _CRYPTO_SPECK_H
+#define _CRYPTO_SPECK_H
+
+#include <linux/types.h>
+
+/* Speck128 */
+
+#define SPECK128_BLOCK_SIZE 16
+
+#define SPECK128_128_KEY_SIZE 16
+#define SPECK128_128_NROUNDS 32
+
+#define SPECK128_192_KEY_SIZE 24
+#define SPECK128_192_NROUNDS 33
+
+#define SPECK128_256_KEY_SIZE 32
+#define SPECK128_256_NROUNDS 34
+
+struct speck128_tfm_ctx {
+ u64 round_keys[SPECK128_256_NROUNDS];
+ int nrounds;
+};
+
+void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
+ unsigned int keysize);
+
+/* Speck64 */
+
+#define SPECK64_BLOCK_SIZE 8
+
+#define SPECK64_96_KEY_SIZE 12
+#define SPECK64_96_NROUNDS 26
+
+#define SPECK64_128_KEY_SIZE 16
+#define SPECK64_128_NROUNDS 27
+
+struct speck64_tfm_ctx {
+ u32 round_keys[SPECK64_128_NROUNDS];
+ int nrounds;
+};
+
+void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
+ unsigned int keysize);
+
+#endif /* _CRYPTO_SPECK_H */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 322aab6e78a7..34d94c95445a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -6,27 +6,10 @@
#include <crypto/internal/skcipher.h>
#include <linux/fips.h>
-struct scatterlist;
-struct blkcipher_desc;
-
#define XTS_BLOCK_SIZE 16
-struct xts_crypt_req {
- le128 *tbuf;
- unsigned int tbuflen;
-
- void *tweak_ctx;
- void (*tweak_fn)(void *ctx, u8* dst, const u8* src);
- void *crypt_ctx;
- void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
-};
-
#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
-int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- struct xts_crypt_req *req);
-
static inline int xts_check_key(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 451aaa0786ae..4b13e0a3e15b 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -156,6 +156,23 @@ static inline void le64_add_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) + val);
}
+/* XXX: this stuff can be optimized */
+static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __le32_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le32s(buf);
+ buf++;
+ }
+}
+
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 7e6e84cf6383..6eb06101089f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -435,6 +435,14 @@ struct compress_alg {
* @cra_exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @cra_init, used to remove various changes set in
* @cra_init.
+ * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
+ * definition. See @struct @ablkcipher_alg.
+ * @cra_u.blkcipher: Union member which contains a synchronous block cipher
+ * definition See @struct @blkcipher_alg.
+ * @cra_u.cipher: Union member which contains a single-block symmetric cipher
+ * definition. See @struct @cipher_alg.
+ * @cra_u.compress: Union member which contains a (de)compression algorithm.
+ * See @struct @compress_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used