summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-28 15:38:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-28 15:38:56 -0800
commita78208e2436963d0b2c7d186277d6e1a9755029a (patch)
tree090caa51386d811a2750aef3dc70cd247f6aa622
parent68353984d63d8d7ea728819dbdb7aecc5f32d360 (diff)
parent0bc81767c5bd9d005fae1099fb39eb3688370cb1 (diff)
downloadlinux-a78208e2436963d0b2c7d186277d6e1a9755029a.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Removed CRYPTO_TFM_RES flags - Extended spawn grabbing to all algorithm types - Moved hash descsize verification into API code Algorithms: - Fixed recursive pcrypt dead-lock - Added new 32 and 64-bit generic versions of poly1305 - Added cryptogams implementation of x86/poly1305 Drivers: - Added support for i.MX8M Mini in caam - Added support for i.MX8M Nano in caam - Added support for i.MX8M Plus in caam - Added support for A33 variant of SS in sun4i-ss - Added TEE support for Raven Ridge in ccp - Added in-kernel API to submit TEE commands in ccp - Added AMD-TEE driver - Added support for BCM2711 in iproc-rng200 - Added support for AES256-GCM based ciphers for chtls - Added aead support on SEC2 in hisilicon" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (244 commits) crypto: arm/chacha - fix build failured when kernel mode NEON is disabled crypto: caam - add support for i.MX8M Plus crypto: x86/poly1305 - emit does base conversion itself crypto: hisilicon - fix spelling mistake "disgest" -> "digest" crypto: chacha20poly1305 - add back missing test vectors and test chunking crypto: x86/poly1305 - fix .gitignore typo tee: fix memory allocation failure checks on drv_data and amdtee crypto: ccree - erase unneeded inline funcs crypto: ccree - make cc_pm_put_suspend() void crypto: ccree - split overloaded usage of irq field crypto: ccree - fix PM race condition crypto: ccree - fix FDE descriptor sequence crypto: ccree - cc_do_send_request() is void func crypto: ccree - fix pm wrongful error reporting crypto: ccree - turn errors to debug msgs crypto: ccree - fix AEAD decrypt auth fail crypto: ccree - fix typo in comment crypto: ccree - fix typos in error msgs crypto: atmel-{aes,sha,tdes} - Retire crypto_platform_data crypto: x86/sha - Eliminate casts on asm implementations ...
-rw-r--r--.mailmap1
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/padata.rst169
-rw-r--r--Documentation/crypto/devel-algos.rst38
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt1
-rw-r--r--Documentation/padata.txt163
-rw-r--r--Documentation/tee.txt81
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/arm/crypto/aes-ce-glue.c14
-rw-r--r--arch/arm/crypto/chacha-glue.c4
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c4
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c11
-rw-r--r--arch/arm/crypto/poly1305-glue.c18
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S16
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c8
-rw-r--r--arch/arm64/crypto/aes-ce-core.S16
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c8
-rw-r--r--arch/arm64/crypto/aes-ce.S4
-rw-r--r--arch/arm64/crypto/aes-cipher-core.S8
-rw-r--r--arch/arm64/crypto/aes-glue.c31
-rw-r--r--arch/arm64/crypto/aes-modes.S16
-rw-r--r--arch/arm64/crypto/aes-neon.S4
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S40
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S16
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S12
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S8
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c10
-rw-r--r--arch/arm64/crypto/nh-neon-core.S4
-rw-r--r--arch/arm64/crypto/poly1305-glue.c18
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c17
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c34
-rw-r--r--arch/arm64/crypto/sha256-glue.c32
-rw-r--r--arch/arm64/crypto/sha3-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c26
-rw-r--r--arch/arm64/crypto/sha512-glue.c15
-rw-r--r--arch/arm64/crypto/sm3-ce-core.S4
-rw-r--r--arch/arm64/crypto/sm4-ce-core.S4
-rw-r--r--arch/mips/crypto/crc32-mips.c4
-rw-r--r--arch/mips/crypto/poly1305-glue.c18
-rw-r--r--arch/powerpc/crypto/aes-spe-glue.c18
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c4
-rw-r--r--arch/s390/crypto/aes_s390.c27
-rw-r--r--arch/s390/crypto/crc32-vx.c8
-rw-r--r--arch/s390/crypto/ghash_s390.c4
-rw-r--r--arch/s390/crypto/paes_s390.c25
-rw-r--r--arch/sparc/crypto/aes_glue.c2
-rw-r--r--arch/sparc/crypto/camellia_glue.c5
-rw-r--r--arch/sparc/crypto/crc32c_glue.c4
-rw-r--r--arch/x86/crypto/.gitignore1
-rw-r--r--arch/x86/crypto/Makefile11
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c4
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S8
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c55
-rw-r--r--arch/x86/crypto/blake2s-glue.c4
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c77
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c81
-rw-r--r--arch/x86/crypto/camellia_glue.c54
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c74
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c4
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c4
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c11
-rw-r--r--arch/x86/crypto/glue_helper.c23
-rw-r--r--arch/x86/crypto/poly1305-avx2-x86_64.S390
-rw-r--r--arch/x86/crypto/poly1305-sse2-x86_64.S590
-rw-r--r--arch/x86/crypto/poly1305-x86_64-cryptogams.pl4265
-rw-r--r--arch/x86/crypto/poly1305_glue.c304
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c65
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c63
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c30
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S6
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S14
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c70
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S4
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S4
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S6
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c34
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S11
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S11
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S13
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c31
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c81
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c37
-rw-r--r--arch/x86/include/asm/crypto/camellia.h65
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h18
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h20
-rw-r--r--arch/x86/include/asm/crypto/serpent-sse2.h28
-rw-r--r--arch/x86/include/asm/crypto/twofish.h19
-rw-r--r--crypto/Kconfig4
-rw-r--r--crypto/acompress.c4
-rw-r--r--crypto/adiantum.c102
-rw-r--r--crypto/aead.c15
-rw-r--r--crypto/aegis128-core.c4
-rw-r--r--crypto/aes_generic.c18
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/ahash.c54
-rw-r--r--crypto/akcipher.c9
-rw-r--r--crypto/algapi.c248
-rw-r--r--crypto/algboss.c12
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/api.c24
-rw-r--r--crypto/authenc.c70
-rw-r--r--crypto/authencesn.c70
-rw-r--r--crypto/blake2b_generic.c4
-rw-r--r--crypto/blake2s_generic.c4
-rw-r--r--crypto/camellia_generic.c5
-rw-r--r--crypto/cast6_generic.c28
-rw-r--r--crypto/cbc.c15
-rw-r--r--crypto/ccm.c136
-rw-r--r--crypto/cfb.c5
-rw-r--r--crypto/chacha20poly1305.c96
-rw-r--r--crypto/cipher.c93
-rw-r--r--crypto/cmac.c40
-rw-r--r--crypto/compress.c31
-rw-r--r--crypto/crc32_generic.c4
-rw-r--r--crypto/crc32c_generic.c4
-rw-r--r--crypto/cryptd.c131
-rw-r--r--crypto/crypto_user_base.c3
-rw-r--r--crypto/ctr.c26
-rw-r--r--crypto/cts.c15
-rw-r--r--crypto/des_generic.c10
-rw-r--r--crypto/ecb.c5
-rw-r--r--crypto/echainiv.c20
-rw-r--r--crypto/essiv.c44
-rw-r--r--crypto/gcm.c96
-rw-r--r--crypto/geniv.c19
-rw-r--r--crypto/ghash-generic.c4
-rw-r--r--crypto/hmac.c62
-rw-r--r--crypto/internal.h4
-rw-r--r--crypto/keywrap.c15
-rw-r--r--crypto/lrw.c17
-rw-r--r--crypto/michael_mic.c4
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/ofb.c5
-rw-r--r--crypto/pcbc.c5
-rw-r--r--crypto/pcrypt.c44
-rw-r--r--crypto/poly1305_generic.c25
-rw-r--r--crypto/rsa-pkcs1pad.c8
-rw-r--r--crypto/scompress.c4
-rw-r--r--crypto/seqiv.c20
-rw-r--r--crypto/serpent_generic.c6
-rw-r--r--crypto/shash.c95
-rw-r--r--crypto/simd.c12
-rw-r--r--crypto/skcipher.c97
-rw-r--r--crypto/sm4_generic.c16
-rw-r--r--crypto/testmgr.c584
-rw-r--r--crypto/testmgr.h14
-rw-r--r--crypto/twofish_common.c8
-rw-r--r--crypto/vmac.c44
-rw-r--r--crypto/xcbc.c45
-rw-r--r--crypto/xts.c17
-rw-r--r--crypto/xxhash_generic.c4
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/crypto/Kconfig89
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c24
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c31
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c37
-rw-r--r--drivers/crypto/amlogic/Kconfig1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-aes.c359
-rw-r--r--drivers/crypto/atmel-authenc.h3
-rw-r--r--drivers/crypto/atmel-sha.c473
-rw-r--r--drivers/crypto/atmel-tdes.c375
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c17
-rw-r--r--drivers/crypto/caam/Kconfig14
-rw-r--r--drivers/crypto/caam/caamalg.c33
-rw-r--r--drivers/crypto/caam/caamalg_qi.c44
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c206
-rw-r--r--drivers/crypto/caam/caamhash.c167
-rw-r--r--drivers/crypto/caam/ctrl.c15
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c12
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c1
-rw-r--r--drivers/crypto/ccp/psp-dev.c1042
-rw-r--r--drivers/crypto/ccp/psp-dev.h51
-rw-r--r--drivers/crypto/ccp/sev-dev.c1077
-rw-r--r--drivers/crypto/ccp/sev-dev.h63
-rw-r--r--drivers/crypto/ccp/sp-dev.h17
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/ccp/tee-dev.c375
-rw-r--r--drivers/crypto/ccp/tee-dev.h110
-rw-r--r--drivers/crypto/ccree/cc_aead.c43
-rw-r--r--drivers/crypto/ccree/cc_cipher.c58
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_fips.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c8
-rw-r--r--drivers/crypto/ccree/cc_pm.c39
-rw-r--r--drivers/crypto/ccree/cc_pm.h17
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c103
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h8
-rw-r--r--drivers/crypto/chelsio/Kconfig30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c57
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h21
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c65
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c28
-rw-r--r--drivers/crypto/geode-aes.c24
-rw-r--r--drivers/crypto/hisilicon/Kconfig11
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c141
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c60
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h49
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c963
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h22
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c23
-rw-r--r--drivers/crypto/hisilicon/sgl.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c92
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel.h34
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c600
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c36
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c130
-rw-r--r--drivers/crypto/ixp4xx_crypto.c31
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c4
-rw-r--r--drivers/crypto/mxs-dcp.c12
-rw-r--r--drivers/crypto/n2_core.c1
-rw-r--r--drivers/crypto/omap-aes-gcm.c223
-rw-r--r--drivers/crypto/omap-aes.c142
-rw-r--r--drivers/crypto/omap-aes.h12
-rw-r--r--drivers/crypto/omap-crypto.c37
-rw-r--r--drivers/crypto/omap-des.c13
-rw-r--r--drivers/crypto/omap-sham.c191
-rw-r--r--drivers/crypto/padlock-aes.c9
-rw-r--r--drivers/crypto/padlock-sha.c26
-rw-r--r--drivers/crypto/picoxcell_crypto.c30
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/common.c244
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c9
-rw-r--r--drivers/crypto/stm32/Kconfig6
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/talitos.c15
-rw-r--r--drivers/crypto/ux500/Kconfig16
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c8
-rw-r--r--drivers/crypto/vmx/aes_xts.c3
-rw-r--r--drivers/tee/Kconfig4
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/Kconfig8
-rw-r--r--drivers/tee/amdtee/Makefile5
-rw-r--r--drivers/tee/amdtee/amdtee_if.h183
-rw-r--r--drivers/tee/amdtee/amdtee_private.h159
-rw-r--r--drivers/tee/amdtee/call.c373
-rw-r--r--drivers/tee/amdtee/core.c518
-rw-r--r--drivers/tee/amdtee/shm_pool.c93
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/keystore.c4
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/algapi.h84
-rw-r--r--include/crypto/cast6.h7
-rw-r--r--include/crypto/hash.h13
-rw-r--r--include/crypto/internal/acompress.h4
-rw-r--r--include/crypto/internal/aead.h21
-rw-r--r--include/crypto/internal/akcipher.h12
-rw-r--r--include/crypto/internal/chacha.h2
-rw-r--r--include/crypto/internal/des.h23
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h90
-rw-r--r--include/crypto/internal/poly1305.h45
-rw-r--r--include/crypto/internal/scompress.h4
-rw-r--r--include/crypto/internal/skcipher.h27
-rw-r--r--include/crypto/nhpoly1305.h4
-rw-r--r--include/crypto/poly1305.h26
-rw-r--r--include/crypto/serpent.h4
-rw-r--r--include/crypto/skcipher.h26
-rw-r--r--include/crypto/twofish.h2
-rw-r--r--include/crypto/xts.h21
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/crypto.h104
-rw-r--r--include/linux/padata.h56
-rw-r--r--include/linux/platform_data/crypto-atmel.h23
-rw-r--r--include/linux/psp-tee.h91
-rw-r--r--include/uapi/linux/tee.h1
-rw-r--r--kernel/padata.c386
-rw-r--r--lib/crypto/Kconfig2
-rw-r--r--lib/crypto/Makefile14
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c1712
-rw-r--r--lib/crypto/curve25519-generic.c24
-rw-r--r--lib/crypto/curve25519-selftest.c1321
-rw-r--r--lib/crypto/curve25519.c20
-rw-r--r--lib/crypto/poly1305-donna32.c204
-rw-r--r--lib/crypto/poly1305-donna64.c185
-rw-r--r--lib/crypto/poly1305.c169
314 files changed, 16782 insertions, 8209 deletions
diff --git a/.mailmap b/.mailmap
index 0c36f3317457..bf581623ee3a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -139,6 +139,7 @@ Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
+Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index ab0eae1c153a..ab0b9ec85506 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -39,6 +39,7 @@ Core utilities
../RCU/index
gcc-plugins
symbol-namespaces
+ padata
Interfaces for kernel debugging
diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst
new file mode 100644
index 000000000000..9a24c111781d
--- /dev/null
+++ b/Documentation/core-api/padata.rst
@@ -0,0 +1,169 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+The padata parallel execution mechanism
+=======================================
+
+:Date: December 2019
+
+Padata is a mechanism by which the kernel can farm jobs out to be done in
+parallel on multiple CPUs while retaining their ordering. It was developed for
+use with the IPsec code, which needs to be able to perform encryption and
+decryption on large numbers of packets without reordering those packets. The
+crypto developers made a point of writing padata in a sufficiently general
+fashion that it could be put to other uses as well.
+
+Usage
+=====
+
+Initializing
+------------
+
+The first step in using padata is to set up a padata_instance structure for
+overall control of how jobs are to be run::
+
+ #include <linux/padata.h>
+
+ struct padata_instance *padata_alloc_possible(const char *name);
+
+'name' simply identifies the instance.
+
+There are functions for enabling and disabling the instance::
+
+ int padata_start(struct padata_instance *pinst);
+ void padata_stop(struct padata_instance *pinst);
+
+These functions are setting or clearing the "PADATA_INIT" flag; if that flag is
+not set, other functions will refuse to work. padata_start() returns zero on
+success (flag set) or -EINVAL if the padata cpumask contains no active CPU
+(flag not set). padata_stop() clears the flag and blocks until the padata
+instance is unused.
+
+Finally, complete padata initialization by allocating a padata_shell::
+
+ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
+
+A padata_shell is used to submit a job to padata and allows a series of such
+jobs to be serialized independently. A padata_instance may have one or more
+padata_shells associated with it, each allowing a separate series of jobs.
+
+Modifying cpumasks
+------------------
+
+The CPUs used to run jobs can be changed in two ways, programatically with
+padata_set_cpumask() or via sysfs. The former is defined::
+
+ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
+ cpumask_var_t cpumask);
+
+Here cpumask_type is one of PADATA_CPU_PARALLEL or PADATA_CPU_SERIAL, where a
+parallel cpumask describes which processors will be used to execute jobs
+submitted to this instance in parallel and a serial cpumask defines which
+processors are allowed to be used as the serialization callback processor.
+cpumask specifies the new cpumask to use.
+
+There may be sysfs files for an instance's cpumasks. For example, pcrypt's
+live in /sys/kernel/pcrypt/<instance-name>. Within an instance's directory
+there are two files, parallel_cpumask and serial_cpumask, and either cpumask
+may be changed by echoing a bitmask into the file, for example::
+
+ echo f > /sys/kernel/pcrypt/pencrypt/parallel_cpumask
+
+Reading one of these files shows the user-supplied cpumask, which may be
+different from the 'usable' cpumask.
+
+Padata maintains two pairs of cpumasks internally, the user-supplied cpumasks
+and the 'usable' cpumasks. (Each pair consists of a parallel and a serial
+cpumask.) The user-supplied cpumasks default to all possible CPUs on instance
+allocation and may be changed as above. The usable cpumasks are always a
+subset of the user-supplied cpumasks and contain only the online CPUs in the
+user-supplied masks; these are the cpumasks padata actually uses. So it is
+legal to supply a cpumask to padata that contains offline CPUs. Once an
+offline CPU in the user-supplied cpumask comes online, padata is going to use
+it.
+
+Changing the CPU masks are expensive operations, so it should not be done with
+great frequency.
+
+Running A Job
+-------------
+
+Actually submitting work to the padata instance requires the creation of a
+padata_priv structure, which represents one job::
+
+ struct padata_priv {
+ /* Other stuff here... */
+ void (*parallel)(struct padata_priv *padata);
+ void (*serial)(struct padata_priv *padata);
+ };
+
+This structure will almost certainly be embedded within some larger
+structure specific to the work to be done. Most of its fields are private to
+padata, but the structure should be zeroed at initialisation time, and the
+parallel() and serial() functions should be provided. Those functions will
+be called in the process of getting the work done as we will see
+momentarily.
+
+The submission of the job is done with::
+
+ int padata_do_parallel(struct padata_shell *ps,
+ struct padata_priv *padata, int *cb_cpu);
+
+The ps and padata structures must be set up as described above; cb_cpu
+points to the preferred CPU to be used for the final callback when the job is
+done; it must be in the current instance's CPU mask (if not the cb_cpu pointer
+is updated to point to the CPU actually chosen). The return value from
+padata_do_parallel() is zero on success, indicating that the job is in
+progress. -EBUSY means that somebody, somewhere else is messing with the
+instance's CPU mask, while -EINVAL is a complaint about cb_cpu not being in the
+serial cpumask, no online CPUs in the parallel or serial cpumasks, or a stopped
+instance.
+
+Each job submitted to padata_do_parallel() will, in turn, be passed to
+exactly one call to the above-mentioned parallel() function, on one CPU, so
+true parallelism is achieved by submitting multiple jobs. parallel() runs with
+software interrupts disabled and thus cannot sleep. The parallel()
+function gets the padata_priv structure pointer as its lone parameter;
+information about the actual work to be done is probably obtained by using
+container_of() to find the enclosing structure.
+
+Note that parallel() has no return value; the padata subsystem assumes that
+parallel() will take responsibility for the job from this point. The job
+need not be completed during this call, but, if parallel() leaves work
+outstanding, it should be prepared to be called again with a new job before
+the previous one completes.
+
+Serializing Jobs
+----------------
+
+When a job does complete, parallel() (or whatever function actually finishes
+the work) should inform padata of the fact with a call to::
+
+ void padata_do_serial(struct padata_priv *padata);
+
+At some point in the future, padata_do_serial() will trigger a call to the
+serial() function in the padata_priv structure. That call will happen on
+the CPU requested in the initial call to padata_do_parallel(); it, too, is
+run with local software interrupts disabled.
+Note that this call may be deferred for a while since the padata code takes
+pains to ensure that jobs are completed in the order in which they were
+submitted.
+
+Destroying
+----------
+
+Cleaning up a padata instance predictably involves calling the three free
+functions that correspond to the allocation in reverse::
+
+ void padata_free_shell(struct padata_shell *ps);
+ void padata_stop(struct padata_instance *pinst);
+ void padata_free(struct padata_instance *pinst);
+
+It is the user's responsibility to ensure all outstanding jobs are complete
+before any of the above are called.
+
+Interface
+=========
+
+.. kernel-doc:: include/linux/padata.h
+.. kernel-doc:: kernel/padata.c
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
index f9d288015acc..f225a953ab4b 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -31,33 +31,23 @@ The counterparts to those functions are listed below.
::
- int crypto_unregister_alg(struct crypto_alg *alg);
- int crypto_unregister_algs(struct crypto_alg *algs, int count);
+ void crypto_unregister_alg(struct crypto_alg *alg);
+ void crypto_unregister_algs(struct crypto_alg *algs, int count);
-Notice that both registration and unregistration functions do return a
-value, so make sure to handle errors. A return code of zero implies
-success. Any return code < 0 implies an error.
+The registration functions return 0 on success, or a negative errno
+value on failure. crypto_register_algs() succeeds only if it
+successfully registered all the given algorithms; if it fails partway
+through, then any changes are rolled back.
-The bulk registration/unregistration functions register/unregister each
-transformation in the given array of length count. They handle errors as
-follows:
-
-- crypto_register_algs() succeeds if and only if it successfully
- registers all the given transformations. If an error occurs partway
- through, then it rolls back successful registrations before returning
- the error code. Note that if a driver needs to handle registration
- errors for individual transformations, then it will need to use the
- non-bulk function crypto_register_alg() instead.
-
-- crypto_unregister_algs() tries to unregister all the given
- transformations, continuing on error. It logs errors and always
- returns zero.
+The unregistration functions always succeed, so they don't have a
+return value. Don't try to unregister algorithms that aren't
+currently registered.
Single-Block Symmetric Ciphers [CIPHER]
---------------------------------------
-Example of transformations: aes, arc4, ...
+Example of transformations: aes, serpent, ...
This section describes the simplest of all transformation
implementations, that being the CIPHER type used for symmetric ciphers.
@@ -108,7 +98,7 @@ is also valid:
Multi-Block Ciphers
-------------------
-Example of transformations: cbc(aes), ecb(arc4), ...
+Example of transformations: cbc(aes), chacha20, ...
This section describes the multi-block cipher transformation
implementations. The multi-block ciphers are used for transformations
@@ -169,10 +159,10 @@ are as follows:
::
- int crypto_unregister_ahash(struct ahash_alg *alg);
+ void crypto_unregister_ahash(struct ahash_alg *alg);
- int crypto_unregister_shash(struct shash_alg *alg);
- int crypto_unregister_shashes(struct shash_alg *algs, int count);
+ void crypto_unregister_shash(struct shash_alg *alg);
+ void crypto_unregister_shashes(struct shash_alg *algs, int count);
Cipher Definition With struct shash_alg and ahash_alg
diff --git a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
index c223e54452da..802523196ee5 100644
--- a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
+++ b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
@@ -2,6 +2,7 @@ HWRNG support for the iproc-rng200 driver
Required properties:
- compatible : Must be one of:
+ "brcm,bcm2711-rng200"
"brcm,bcm7211-rng200"
"brcm,bcm7278-rng200"
"brcm,iproc-rng200"
diff --git a/Documentation/padata.txt b/Documentation/padata.txt
deleted file mode 100644
index b37ba1eaace3..000000000000
--- a/Documentation/padata.txt
+++ /dev/null
@@ -1,163 +0,0 @@
-=======================================
-The padata parallel execution mechanism
-=======================================
-
-:Last updated: for 2.6.36
-
-Padata is a mechanism by which the kernel can farm work out to be done in
-parallel on multiple CPUs while retaining the ordering of tasks. It was
-developed for use with the IPsec code, which needs to be able to perform
-encryption and decryption on large numbers of packets without reordering
-those packets. The crypto developers made a point of writing padata in a
-sufficiently general fashion that it could be put to other uses as well.
-
-The first step in using padata is to set up a padata_instance structure for
-overall control of how tasks are to be run::
-
- #include <linux/padata.h>
-
- struct padata_instance *padata_alloc(const char *name,
- const struct cpumask *pcpumask,
- const struct cpumask *cbcpumask);
-
-'name' simply identifies the instance.
-
-The pcpumask describes which processors will be used to execute work
-submitted to this instance in parallel. The cbcpumask defines which
-processors are allowed to be used as the serialization callback processor.
-The workqueue wq is where the work will actually be done; it should be
-a multithreaded queue, naturally.
-
-To allocate a padata instance with the cpu_possible_mask for both
-cpumasks this helper function can be used::
-
- struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq);
-
-Note: Padata maintains two kinds of cpumasks internally. The user supplied
-cpumasks, submitted by padata_alloc/padata_alloc_possible and the 'usable'
-cpumasks. The usable cpumasks are always a subset of active CPUs in the
-user supplied cpumasks; these are the cpumasks padata actually uses. So
-it is legal to supply a cpumask to padata that contains offline CPUs.
-Once an offline CPU in the user supplied cpumask comes online, padata
-is going to use it.
-
-There are functions for enabling and disabling the instance::
-
- int padata_start(struct padata_instance *pinst);
- void padata_stop(struct padata_instance *pinst);
-
-These functions are setting or clearing the "PADATA_INIT" flag;
-if that flag is not set, other functions will refuse to work.
-padata_start returns zero on success (flag set) or -EINVAL if the
-padata cpumask contains no active CPU (flag not set).
-padata_stop clears the flag and blocks until the padata instance
-is unused.
-
-The list of CPUs to be used can be adjusted with these functions::
-
- int padata_set_cpumasks(struct padata_instance *pinst,
- cpumask_var_t pcpumask,
- cpumask_var_t cbcpumask);
- int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
- cpumask_var_t cpumask);
- int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
- int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
-
-Changing the CPU masks are expensive operations, though, so it should not be
-done with great frequency.
-
-It's possible to change both cpumasks of a padata instance with
-padata_set_cpumasks by specifying the cpumasks for parallel execution (pcpumask)
-and for the serial callback function (cbcpumask). padata_set_cpumask is used to
-change just one of the cpumasks. Here cpumask_type is one of PADATA_CPU_SERIAL,
-PADATA_CPU_PARALLEL and cpumask specifies the new cpumask to use.
-To simply add or remove one CPU from a certain cpumask the functions
-padata_add_cpu/padata_remove_cpu are used. cpu specifies the CPU to add or
-remove and mask is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL.
-
-If a user is interested in padata cpumask changes, he can register to
-the padata cpumask change notifier::
-
- int padata_register_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
-
-To unregister from that notifier::
-
- int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
-
-The padata cpumask change notifier notifies about changes of the usable
-cpumasks, i.e. the subset of active CPUs in the user supplied cpumask.
-
-Padata calls the notifier chain with::
-
- blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
- notification_mask,
- &pd_new->cpumask);
-
-Here cpumask_change_notifier is registered notifier, notification_mask
-is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL and cpumask is a pointer
-to a struct padata_cpumask that contains the new cpumask information.
-
-Actually submitting work to the padata instance requires the creation of a
-padata_priv structure::
-
- struct padata_priv {
- /* Other stuff here... */
- void (*parallel)(struct padata_priv *padata);
- void (*serial)(struct padata_priv *padata);
- };
-
-This structure will almost certainly be embedded within some larger
-structure specific to the work to be done. Most of its fields are private to
-padata, but the structure should be zeroed at initialisation time, and the
-parallel() and serial() functions should be provided. Those functions will
-be called in the process of getting the work done as we will see
-momentarily.
-
-The submission of work is done with::
-
- int padata_do_parallel(struct padata_instance *pinst,
- struct padata_priv *padata, int cb_cpu);
-
-The pinst and padata structures must be set up as described above; cb_cpu
-specifies which CPU will be used for the final callback when the work is
-done; it must be in the current instance's CPU mask. The return value from
-padata_do_parallel() is zero on success, indicating that the work is in
-progress. -EBUSY means that somebody, somewhere else is messing with the
-instance's CPU mask, while -EINVAL is a complaint about cb_cpu not being
-in that CPU mask or about a not running instance.
-
-Each task submitted to padata_do_parallel() will, in turn, be passed to
-exactly one call to the above-mentioned parallel() function, on one CPU, so
-true parallelism is achieved by submitting multiple tasks. parallel() runs with
-software interrupts disabled and thus cannot sleep. The parallel()
-function gets the padata_priv structure pointer as its lone parameter;
-information about the actual work to be done is probably obtained by using
-container_of() to find the enclosing structure.
-
-Note that parallel() has no return value; the padata subsystem assumes that
-parallel() will take responsibility for the task from this point. The work
-need not be completed during this call, but, if parallel() leaves work
-outstanding, it should be prepared to be called again with a new job before
-the previous one completes. When a task does complete, parallel() (or
-whatever function actually finishes the job) should inform padata of the
-fact with a call to::
-
- void padata_do_serial(struct padata_priv *padata);
-
-At some point in the future, padata_do_serial() will trigger a call to the
-serial() function in the padata_priv structure. That call will happen on
-the CPU requested in the initial call to padata_do_parallel(); it, too, is
-run with local software interrupts disabled.
-Note that this call may be deferred for a while since the padata code takes
-pains to ensure that tasks are completed in the order in which they were
-submitted.
-
-The one remaining function in the padata API should be called to clean up
-when a padata instance is no longer needed::
-
- void padata_free(struct padata_instance *pinst);
-
-This function will busy-wait while any remaining tasks are completed, so it
-might be best not to call it while there is work outstanding.
diff --git a/Documentation/tee.txt b/Documentation/tee.txt
index afacdf2fd1de..c8fad81c4563 100644
--- a/Documentation/tee.txt
+++ b/Documentation/tee.txt
@@ -112,6 +112,83 @@ kernel are handled by the kernel driver. Other RPC messages will be forwarded to
tee-supplicant without further involvement of the driver, except switching
shared memory buffer representation.
+AMD-TEE driver
+==============
+
+The AMD-TEE driver handles the communication with AMD's TEE environment. The
+TEE environment is provided by AMD Secure Processor.
+
+The AMD Secure Processor (formerly called Platform Security Processor or PSP)
+is a dedicated processor that features ARM TrustZone technology, along with a
+software-based Trusted Execution Environment (TEE) designed to enable
+third-party Trusted Applications. This feature is currently enabled only for
+APUs.
+
+The following picture shows a high level overview of AMD-TEE::
+
+ |
+ x86 |
+ |
+ User space (Kernel space) | AMD Secure Processor (PSP)
+ ~~~~~~~~~~ ~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ |
+ +--------+ | +-------------+
+ | Client | | | Trusted |
+ +--------+ | | Application |
+ /\ | +-------------+
+ || | /\
+ || | ||
+ || | \/
+ || | +----------+
+ || | | TEE |
+ || | | Internal |
+ \/ | | API |
+ +---------+ +-----------+---------+ +----------+
+ | TEE | | TEE | AMD-TEE | | AMD-TEE |
+ | Client | | subsystem | driver | | Trusted |
+ | API | | | | | OS |
+ +---------+-----------+----+------+---------+---------+----------+
+ | Generic TEE API | | ASP | Mailbox |
+ | IOCTL (TEE_IOC_*) | | driver | Register Protocol |
+ +--------------------------+ +---------+--------------------+
+
+At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the
+CPU to PSP mailbox regsister to submit commands to the PSP. The format of the
+command buffer is opaque to the ASP driver. It's role is to submit commands to
+the secure processor and return results to AMD-TEE driver. The interface
+between AMD-TEE driver and AMD Secure Processor driver can be found in [6].
+
+The AMD-TEE driver packages the command buffer payload for processing in TEE.
+The command buffer format for the different TEE commands can be found in [7].
+
+The TEE commands supported by AMD-TEE Trusted OS are:
+* TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into
+ TEE environment.
+* TEE_CMD_ID_UNLOAD_TA - unloads TA binary from TEE environment.
+* TEE_CMD_ID_OPEN_SESSION - opens a session with a loaded TA.
+* TEE_CMD_ID_CLOSE_SESSION - closes session with loaded TA
+* TEE_CMD_ID_INVOKE_CMD - invokes a command with loaded TA
+* TEE_CMD_ID_MAP_SHARED_MEM - maps shared memory
+* TEE_CMD_ID_UNMAP_SHARED_MEM - unmaps shared memory
+
+AMD-TEE Trusted OS is the firmware running on AMD Secure Processor.
+
+The AMD-TEE driver registers itself with TEE subsystem and implements the
+following driver function callbacks:
+
+* get_version - returns the driver implementation id and capability.
+* open - sets up the driver context data structure.
+* release - frees up driver resources.
+* open_session - loads the TA binary and opens session with loaded TA.
+* close_session - closes session with loaded TA and unloads it.
+* invoke_func - invokes a command with loaded TA.
+
+cancel_req driver callback is not supported by AMD-TEE.
+
+The GlobalPlatform TEE Client API [5] can be used by the user space (client) to
+talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening
+a session, invoking commands and clossing session with TA.
+
References
==========
@@ -125,3 +202,7 @@ References
[5] http://www.globalplatform.org/specificationsdevice.asp look for
"TEE Client API Specification v1.0" and click download.
+
+[6] include/linux/psp-tee.h
+
+[7] drivers/tee/amdtee/amdtee_if.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 38956b7c3ec6..37622b030228 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -791,7 +791,6 @@ F: include/uapi/rdma/efa-abi.h
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com>
-M: Gary Hook <gary.hook@amd.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/ccp/
@@ -12478,7 +12477,7 @@ L: linux-crypto@vger.kernel.org
S: Maintained
F: kernel/padata.c
F: include/linux/padata.h
-F: Documentation/padata.txt
+F: Documentation/core-api/padata.rst
PAGE POOL
M: Jesper Dangaard Brouer <hawk@kernel.org>
@@ -14568,7 +14567,7 @@ F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
-M: Kamil Konieczny <k.konieczny@partner.samsung.com>
+M: Kamil Konieczny <k.konieczny@samsung.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index cdb1a07e7ad0..b668c97663ec 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -138,14 +138,8 @@ static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
-
- ret = ce_aes_expandkey(ctx, in_key, key_len);
- if (!ret)
- return 0;
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ return ce_aes_expandkey(ctx, in_key, key_len);
}
struct crypto_aes_xts_ctx {
@@ -167,11 +161,7 @@ static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
if (!ret)
ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
key_len / 2);
- if (!ret)
- return 0;
-
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ return ret;
}
static int ecb_encrypt(struct skcipher_request *req)
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
index 6ebbb2b241d2..6fdb0ac62b3d 100644
--- a/arch/arm/crypto/chacha-glue.c
+++ b/arch/arm/crypto/chacha-glue.c
@@ -115,7 +115,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
- if (!neon) {
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
nbytes, state, ctx->nrounds);
state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
@@ -159,7 +159,7 @@ static int do_xchacha(struct skcipher_request *req, bool neon)
chacha_init_generic(state, ctx->key, req->iv);
- if (!neon) {
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
hchacha_block_arm(state, subctx.key, ctx->nrounds);
} else {
kernel_neon_begin();
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index 95592499b9bd..2208445808d7 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -54,10 +54,8 @@ static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
{
u32 *mctx = crypto_shash_ctx(hash);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index c691077679a6..a00fd329255f 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -163,10 +163,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
struct ghash_key *key = crypto_shash_ctx(tfm);
be128 h;
- if (keylen != GHASH_BLOCK_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
- }
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
@@ -296,16 +294,11 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
- int err;
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(child, key, keylen);
- crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
- & CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_ahash_setkey(child, key, keylen);
}
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
index abe3f2d587dc..ceec04ec2f40 100644
--- a/arch/arm/crypto/poly1305-glue.c
+++ b/arch/arm/crypto/poly1305-glue.c
@@ -20,7 +20,7 @@
void poly1305_init_arm(void *state, const u8 *key);
void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
-void poly1305_emit_arm(void *state, __le32 *digest, const u32 *nonce);
+void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce);
void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
{
@@ -179,9 +179,6 @@ EXPORT_SYMBOL(poly1305_update_arch);
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
{
- __le32 digest[4];
- u64 f = 0;
-
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
@@ -189,18 +186,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
- poly1305_emit_arm(&dctx->h, digest, dctx->s);
-
- /* mac = (h + s) % (2^128) */
- f = (f >> 32) + le32_to_cpu(digest[0]);
- put_unaligned_le32(f, dst);
- f = (f >> 32) + le32_to_cpu(digest[1]);
- put_unaligned_le32(f, dst + 4);
- f = (f >> 32) + le32_to_cpu(digest[2]);
- put_unaligned_le32(f, dst + 8);
- f = (f >> 32) + le32_to_cpu(digest[3]);
- put_unaligned_le32(f, dst + 12);
-
+ poly1305_emit_arm(&dctx->h, dst, dctx->s);
*dctx = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL(poly1305_final_arch);
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 9add9bbc48d8..99a028e298ed 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -15,7 +15,7 @@
* void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
* u32 *macp, u8 const rk[], u32 rounds);
*/
-ENTRY(ce_aes_ccm_auth_data)
+SYM_FUNC_START(ce_aes_ccm_auth_data)
ldr w8, [x3] /* leftover from prev round? */
ld1 {v0.16b}, [x0] /* load mac */
cbz w8, 1f
@@ -81,13 +81,13 @@ ENTRY(ce_aes_ccm_auth_data)
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
-ENDPROC(ce_aes_ccm_auth_data)
+SYM_FUNC_END(ce_aes_ccm_auth_data)
/*
* void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
* u32 rounds);
*/
-ENTRY(ce_aes_ccm_final)
+SYM_FUNC_START(ce_aes_ccm_final)
ld1 {v3.4s}, [x2], #16 /* load first round key */
ld1 {v0.16b}, [x0] /* load mac */
cmp w3, #12 /* which key size? */
@@ -121,7 +121,7 @@ ENTRY(ce_aes_ccm_final)
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
st1 {v0.16b}, [x0] /* store result */
ret
-ENDPROC(ce_aes_ccm_final)
+SYM_FUNC_END(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc
ldr x8, [x6, #8] /* load lower ctr */
@@ -212,10 +212,10 @@ CPU_LE( rev x8, x8 )
* u8 const rk[], u32 rounds, u8 mac[],
* u8 ctr[]);
*/
-ENTRY(ce_aes_ccm_encrypt)
+SYM_FUNC_START(ce_aes_ccm_encrypt)
aes_ccm_do_crypt 1
-ENDPROC(ce_aes_ccm_encrypt)
+SYM_FUNC_END(ce_aes_ccm_encrypt)
-ENTRY(ce_aes_ccm_decrypt)
+SYM_FUNC_START(ce_aes_ccm_decrypt)
aes_ccm_do_crypt 0
-ENDPROC(ce_aes_ccm_decrypt)
+SYM_FUNC_END(ce_aes_ccm_decrypt)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 541cf9165748..f6d19b0dc893 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -47,14 +47,8 @@ static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int ret;
- ret = ce_aes_expandkey(ctx, in_key, key_len);
- if (!ret)
- return 0;
-
- tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ return ce_aes_expandkey(ctx, in_key, key_len);
}
static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S
index 76a30fe4ba8b..e52e13eb8fdb 100644
--- a/arch/arm64/crypto/aes-ce-core.S
+++ b/arch/arm64/crypto/aes-ce-core.S
@@ -8,7 +8,7 @@
.arch armv8-a+crypto
-ENTRY(__aes_ce_encrypt)
+SYM_FUNC_START(__aes_ce_encrypt)
sub w3, w3, #2
ld1 {v0.16b}, [x2]
ld1 {v1.4s}, [x0], #16
@@ -34,9 +34,9 @@ ENTRY(__aes_ce_encrypt)
eor v0.16b, v0.16b, v3.16b
st1 {v0.16b}, [x1]
ret
-ENDPROC(__aes_ce_encrypt)
+SYM_FUNC_END(__aes_ce_encrypt)
-ENTRY(__aes_ce_decrypt)
+SYM_FUNC_START(__aes_ce_decrypt)
sub w3, w3, #2
ld1 {v0.16b}, [x2]
ld1 {v1.4s}, [x0], #16
@@ -62,23 +62,23 @@ ENTRY(__aes_ce_decrypt)
eor v0.16b, v0.16b, v3.16b
st1 {v0.16b}, [x1]
ret
-ENDPROC(__aes_ce_decrypt)
+SYM_FUNC_END(__aes_ce_decrypt)
/*
* __aes_ce_sub() - use the aese instruction to perform the AES sbox
* substitution on each byte in 'input'
*/
-ENTRY(__aes_ce_sub)
+SYM_FUNC_START(__aes_ce_sub)
dup v1.4s, w0
movi v0.16b, #0
aese v0.16b, v1.16b
umov w0, v0.s[0]
ret
-ENDPROC(__aes_ce_sub)
+SYM_FUNC_END(__aes_ce_sub)
-ENTRY(__aes_ce_invert)
+SYM_FUNC_START(__aes_ce_invert)
ld1 {v0.4s}, [x1]
aesimc v1.16b, v0.16b
st1 {v1.4s}, [x0]
ret
-ENDPROC(__aes_ce_invert)
+SYM_FUNC_END(__aes_ce_invert)
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index 6d085dc56c51..56a5f6f0b0c1 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -143,14 +143,8 @@ int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret;
- ret = ce_aes_expandkey(ctx, in_key, key_len);
- if (!ret)
- return 0;
-
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ return ce_aes_expandkey(ctx, in_key, key_len);
}
EXPORT_SYMBOL(ce_aes_setkey);
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index c132c49c89a8..45062553467f 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -9,8 +9,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#define AES_ENTRY(func) ENTRY(ce_ ## func)
-#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
+#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
+#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
.arch armv8-a+crypto
diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S
index 423d0aebc570..c9d6955f8404 100644
--- a/arch/arm64/crypto/aes-cipher-core.S
+++ b/arch/arm64/crypto/aes-cipher-core.S
@@ -122,11 +122,11 @@ CPU_BE( rev w7, w7 )
ret
.endm
-ENTRY(__aes_arm64_encrypt)
+SYM_FUNC_START(__aes_arm64_encrypt)
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
-ENDPROC(__aes_arm64_encrypt)
+SYM_FUNC_END(__aes_arm64_encrypt)
.align 5
-ENTRY(__aes_arm64_decrypt)
+SYM_FUNC_START(__aes_arm64_decrypt)
do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
-ENDPROC(__aes_arm64_decrypt)
+SYM_FUNC_END(__aes_arm64_decrypt)
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index aa57dc639f77..ed5409c6abf4 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -132,13 +132,8 @@ static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
-
- ret = aes_expandkey(ctx, in_key, key_len);
- if (ret)
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return ret;
+ return aes_expandkey(ctx, in_key, key_len);
}
static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
@@ -155,11 +150,7 @@ static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
if (!ret)
ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
key_len / 2);
- if (!ret)
- return 0;
-
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ return ret;
}
static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
@@ -173,19 +164,12 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
ret = aes_expandkey(&ctx->key1, in_key, key_len);
if (ret)
- goto out;
+ return ret;
desc->tfm = ctx->hash;
crypto_shash_digest(desc, in_key, key_len, digest);
- ret = aes_expandkey(&ctx->key2, digest, sizeof(digest));
- if (ret)
- goto out;
-
- return 0;
-out:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ return aes_expandkey(&ctx->key2, digest, sizeof(digest));
}
static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
@@ -791,13 +775,8 @@ static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
unsigned int key_len)
{
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
- int err;
- err = aes_expandkey(&ctx->key, in_key, key_len);
- if (err)
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
- return err;
+ return aes_expandkey(&ctx->key, in_key, key_len);
}
static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 131618389f1f..8a2faa42b57e 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -22,26 +22,26 @@
#define ST5(x...) x
#endif
-aes_encrypt_block4x:
+SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
-ENDPROC(aes_encrypt_block4x)
+SYM_FUNC_END(aes_encrypt_block4x)
-aes_decrypt_block4x:
+SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
-ENDPROC(aes_decrypt_block4x)
+SYM_FUNC_END(aes_decrypt_block4x)
#if MAX_STRIDE == 5
-aes_encrypt_block5x:
+SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret
-ENDPROC(aes_encrypt_block5x)
+SYM_FUNC_END(aes_encrypt_block5x)
-aes_decrypt_block5x:
+SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret
-ENDPROC(aes_decrypt_block5x)
+SYM_FUNC_END(aes_decrypt_block5x)
#endif
/*
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 22d9b110cf78..247d34ddaab0 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -8,8 +8,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#define AES_ENTRY(func) ENTRY(neon_ ## func)
-#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
+#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
+#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
xtsmask .req v7
cbciv .req v7
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index 65982039fa36..b357164379f6 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -380,7 +380,7 @@ ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f
/*
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
*/
-ENTRY(aesbs_convert_key)
+SYM_FUNC_START(aesbs_convert_key)
ld1 {v7.4s}, [x1], #16 // load round 0 key
ld1 {v17.4s}, [x1], #16 // load round 1 key
@@ -425,10 +425,10 @@ ENTRY(aesbs_convert_key)
eor v17.16b, v17.16b, v7.16b
str q17, [x0]
ret
-ENDPROC(aesbs_convert_key)
+SYM_FUNC_END(aesbs_convert_key)
.align 4
-aesbs_encrypt8:
+SYM_FUNC_START_LOCAL(aesbs_encrypt8)
ldr q9, [bskey], #16 // round 0 key
ldr q8, M0SR
ldr q24, SR
@@ -488,10 +488,10 @@ aesbs_encrypt8:
eor v2.16b, v2.16b, v12.16b
eor v5.16b, v5.16b, v12.16b
ret
-ENDPROC(aesbs_encrypt8)
+SYM_FUNC_END(aesbs_encrypt8)
.align 4
-aesbs_decrypt8:
+SYM_FUNC_START_LOCAL(aesbs_decrypt8)
lsl x9, rounds, #7
add bskey, bskey, x9
@@ -553,7 +553,7 @@ aesbs_decrypt8:
eor v3.16b, v3.16b, v12.16b
eor v5.16b, v5.16b, v12.16b
ret
-ENDPROC(aesbs_decrypt8)
+SYM_FUNC_END(aesbs_decrypt8)
/*
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
@@ -621,21 +621,21 @@ ENDPROC(aesbs_decrypt8)
.endm
.align 4
-ENTRY(aesbs_ecb_encrypt)
+SYM_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
-ENDPROC(aesbs_ecb_encrypt)
+SYM_FUNC_END(aesbs_ecb_encrypt)
.align 4
-ENTRY(aesbs_ecb_decrypt)
+SYM_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
-ENDPROC(aesbs_ecb_decrypt)
+SYM_FUNC_END(aesbs_ecb_decrypt)
/*
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
.align 4
-ENTRY(aesbs_cbc_decrypt)
+SYM_FUNC_START(aesbs_cbc_decrypt)
frame_push 6
mov x19, x0
@@ -720,7 +720,7 @@ ENTRY(aesbs_cbc_decrypt)
2: frame_pop
ret
-ENDPROC(aesbs_cbc_decrypt)
+SYM_FUNC_END(aesbs_cbc_decrypt)
.macro next_tweak, out, in, const, tmp
sshr \tmp\().2d, \in\().2d, #63
@@ -736,7 +736,7 @@ ENDPROC(aesbs_cbc_decrypt)
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
-__xts_crypt8:
+SYM_FUNC_START_LOCAL(__xts_crypt8)
mov x6, #1
lsl x6, x6, x23
subs w23, w23, #8
@@ -789,7 +789,7 @@ __xts_crypt8:
0: mov bskey, x21
mov rounds, x22
br x7
-ENDPROC(__xts_crypt8)
+SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
frame_push 6, 64
@@ -854,13 +854,13 @@ ENDPROC(__xts_crypt8)
ret
.endm
-ENTRY(aesbs_xts_encrypt)
+SYM_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
-ENDPROC(aesbs_xts_encrypt)
+SYM_FUNC_END(aesbs_xts_encrypt)
-ENTRY(aesbs_xts_decrypt)
+SYM_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
-ENDPROC(aesbs_xts_decrypt)
+SYM_FUNC_END(aesbs_xts_decrypt)
.macro next_ctr, v
mov \v\().d[1], x8
@@ -874,7 +874,7 @@ ENDPROC(aesbs_xts_decrypt)
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 iv[], u8 final[])
*/
-ENTRY(aesbs_ctr_encrypt)
+SYM_FUNC_START(aesbs_ctr_encrypt)
frame_push 8
mov x19, x0
@@ -1002,4 +1002,4 @@ CPU_LE( rev x8, x8 )
7: cbz x25, 8b
st1 {v5.16b}, [x25]
b 8b
-ENDPROC(aesbs_ctr_encrypt)
+SYM_FUNC_END(aesbs_ctr_encrypt)
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index 706c4e10e9e2..e90386a7db8e 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -36,7 +36,7 @@
*
* Clobbers: w3, x10, v4, v12
*/
-chacha_permute:
+SYM_FUNC_START_LOCAL(chacha_permute)
adr_l x10, ROT8
ld1 {v12.4s}, [x10]
@@ -104,9 +104,9 @@ chacha_permute:
b.ne .Ldoubleround
ret
-ENDPROC(chacha_permute)
+SYM_FUNC_END(chacha_permute)
-ENTRY(chacha_block_xor_neon)
+SYM_FUNC_START(chacha_block_xor_neon)
// x0: Input state matrix, s
// x1: 1 data block output, o
// x2: 1 data block input, i
@@ -143,9 +143,9 @@ ENTRY(chacha_block_xor_neon)
ldp x29, x30, [sp], #16
ret
-ENDPROC(chacha_block_xor_neon)
+SYM_FUNC_END(chacha_block_xor_neon)
-ENTRY(hchacha_block_neon)
+SYM_FUNC_START(hchacha_block_neon)
// x0: Input state matrix, s
// x1: output (8 32-bit words)
// w2: nrounds
@@ -163,7 +163,7 @@ ENTRY(hchacha_block_neon)
ldp x29, x30, [sp], #16
ret
-ENDPROC(hchacha_block_neon)
+SYM_FUNC_END(hchacha_block_neon)
a0 .req w12
a1 .req w13
@@ -183,7 +183,7 @@ ENDPROC(hchacha_block_neon)
a15 .req w28
.align 6
-ENTRY(chacha_4block_xor_neon)
+SYM_FUNC_START(chacha_4block_xor_neon)
frame_push 10
// x0: Input state matrix, s
@@ -845,7 +845,7 @@ CPU_BE( rev a15, a15 )
eor v31.16b, v31.16b, v3.16b
st1 {v28.16b-v31.16b}, [x1]
b .Lout
-ENDPROC(chacha_4block_xor_neon)
+SYM_FUNC_END(chacha_4block_xor_neon)
.section ".rodata", "a", %progbits
.align L1_CACHE_SHIFT
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index e545b42e6a46..5a95c2628fbf 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -131,7 +131,7 @@
tbl bd4.16b, {\bd\().16b}, perm4.16b
.endm
-__pmull_p8_core:
+SYM_FUNC_START_LOCAL(__pmull_p8_core)
.L__pmull_p8_core:
ext t4.8b, ad.8b, ad.8b, #1 // A1
ext t5.8b, ad.8b, ad.8b, #2 // A2
@@ -194,7 +194,7 @@ __pmull_p8_core:
eor t4.16b, t4.16b, t5.16b
eor t6.16b, t6.16b, t3.16b
ret
-ENDPROC(__pmull_p8_core)
+SYM_FUNC_END(__pmull_p8_core)
.macro __pmull_p8, rq, ad, bd, i
.ifnc \bd, fold_consts
@@ -488,9 +488,9 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
//
// Assumes len >= 16.
//
-ENTRY(crc_t10dif_pmull_p8)
+SYM_FUNC_START(crc_t10dif_pmull_p8)
crc_t10dif_pmull p8
-ENDPROC(crc_t10dif_pmull_p8)
+SYM_FUNC_END(crc_t10dif_pmull_p8)
.align 5
//
@@ -498,9 +498,9 @@ ENDPROC(crc_t10dif_pmull_p8)
//
// Assumes len >= 16.
//
-ENTRY(crc_t10dif_pmull_p64)
+SYM_FUNC_START(crc_t10dif_pmull_p64)
crc_t10dif_pmull p64
-ENDPROC(crc_t10dif_pmull_p64)
+SYM_FUNC_END(crc_t10dif_pmull_p64)
.section ".rodata", "a"
.align 4
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index a791c4adf8e6..084c6a30b03a 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -350,13 +350,13 @@ CPU_LE( rev64 T1.16b, T1.16b )
* void pmull_ghash_update(int blocks, u64 dg[], const char *src,
* struct ghash_key const *k, const char *head)
*/
-ENTRY(pmull_ghash_update_p64)
+SYM_FUNC_START(pmull_ghash_update_p64)
__pmull_ghash p64
-ENDPROC(pmull_ghash_update_p64)
+SYM_FUNC_END(pmull_ghash_update_p64)
-ENTRY(pmull_ghash_update_p8)
+SYM_FUNC_START(pmull_ghash_update_p8)
__pmull_ghash p8
-ENDPROC(pmull_ghash_update_p8)
+SYM_FUNC_END(pmull_ghash_update_p8)
KS0 .req v8
KS1 .req v9
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 522cf004ce65..22831d3b7f62 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -248,10 +248,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
{
struct ghash_key *key = crypto_shash_ctx(tfm);
- if (keylen != GHASH_BLOCK_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
- }
return __ghash_setkey(key, inkey, keylen);
}
@@ -259,7 +257,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
static struct shash_alg ghash_alg[] = {{
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-neon",
- .base.cra_priority = 100,
+ .base.cra_priority = 150,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key),
.base.cra_module = THIS_MODULE,
@@ -306,10 +304,8 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
int ret;
ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
- if (ret) {
- tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (ret)
return -EINVAL;
- }
aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
diff --git a/arch/arm64/crypto/nh-neon-core.S b/arch/arm64/crypto/nh-neon-core.S
index e05570c38de7..51c0a534ef87 100644
--- a/arch/arm64/crypto/nh-neon-core.S
+++ b/arch/arm64/crypto/nh-neon-core.S
@@ -62,7 +62,7 @@
*
* It's guaranteed that message_len % 16 == 0.
*/
-ENTRY(nh_neon)
+SYM_FUNC_START(nh_neon)
ld1 {K0.4s,K1.4s}, [KEY], #32
movi PASS0_SUMS.2d, #0
@@ -100,4 +100,4 @@ ENTRY(nh_neon)
addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d
st1 {T0.16b,T1.16b}, [HASH]
ret
-ENDPROC(nh_neon)
+SYM_FUNC_END(nh_neon)
diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
index 83a2338a8826..e97b092f56b8 100644
--- a/arch/arm64/crypto/poly1305-glue.c
+++ b/arch/arm64/crypto/poly1305-glue.c
@@ -21,7 +21,7 @@
asmlinkage void poly1305_init_arm64(void *state, const u8 *key);
asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
-asmlinkage void poly1305_emit(void *state, __le32 *digest, const u32 *nonce);
+asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
@@ -162,9 +162,6 @@ EXPORT_SYMBOL(poly1305_update_arch);
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
{
- __le32 digest[4];
- u64 f = 0;
-
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
@@ -172,18 +169,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
- poly1305_emit(&dctx->h, digest, dctx->s);
-
- /* mac = (h + s) % (2^128) */
- f = (f >> 32) + le32_to_cpu(digest[0]);
- put_unaligned_le32(f, dst);
- f = (f >> 32) + le32_to_cpu(digest[1]);
- put_unaligned_le32(f, dst + 4);
- f = (f >> 32) + le32_to_cpu(digest[2]);
- put_unaligned_le32(f, dst + 8);
- f = (f >> 32) + le32_to_cpu(digest[3]);
- put_unaligned_le32(f, dst + 12);
-
+ poly1305_emit(&dctx->h, dst, dctx->s);
*dctx = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL(poly1305_final_arch);
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index c2ce1f820706..92d0d2753e81 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -65,7 +65,7 @@
* void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
* int blocks)
*/
-ENTRY(sha1_ce_transform)
+SYM_FUNC_START(sha1_ce_transform)
frame_push 3
mov x19, x0
@@ -160,4 +160,4 @@ CPU_LE( rev32 v11.16b, v11.16b )
str dgb, [x19, #16]
frame_pop
ret
-ENDPROC(sha1_ce_transform)
+SYM_FUNC_END(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..63c875d3314b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);
+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks)
+{
+ sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
+ blocks);
+}
+
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
kernel_neon_end();
return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
if (!finalize)
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 6f728a419009..3f9d0f326987 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -75,7 +75,7 @@
* int blocks)
*/
.text
-ENTRY(sha2_ce_transform)
+SYM_FUNC_START(sha2_ce_transform)
frame_push 3
mov x19, x0
@@ -166,4 +166,4 @@ CPU_LE( rev32 v19.16b, v19.16b )
4: st1 {dgav.4s, dgbv.4s}, [x19]
frame_pop
ret
-ENDPROC(sha2_ce_transform)
+SYM_FUNC_END(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a8e67bafba3d 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);
+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src,
+ blocks);
+}
+
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
sst.count);
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_data_order(sst->state, src, blocks);
+}
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
kernel_neon_end();
return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
if (!finalize)
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}
sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 999da59f03a9..ddf4a0d85c1c 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_data_order(sst->state, src, blocks);
+}
+
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_neon(sst->state, src, blocks);
+}
+
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
}
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
while (len > 0) {
unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;
kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, __sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
index a7d587fa54f6..1cfb768df350 100644
--- a/arch/arm64/crypto/sha3-ce-core.S
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -40,7 +40,7 @@
* sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
*/
.text
-ENTRY(sha3_ce_transform)
+SYM_FUNC_START(sha3_ce_transform)
frame_push 4
mov x19, x0
@@ -218,7 +218,7 @@ ENTRY(sha3_ce_transform)
st1 {v24.1d}, [x19]
frame_pop
ret
-ENDPROC(sha3_ce_transform)
+SYM_FUNC_END(sha3_ce_transform)
.section ".rodata", "a"
.align 8
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
index ce65e3abe4f2..cde606c0323e 100644
--- a/arch/arm64/crypto/sha512-ce-core.S
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -106,7 +106,7 @@
* int blocks)
*/
.text
-ENTRY(sha512_ce_transform)
+SYM_FUNC_START(sha512_ce_transform)
frame_push 3
mov x19, x0
@@ -216,4 +216,4 @@ CPU_LE( rev64 v19.16b, v19.16b )
3: st1 {v8.2d-v11.2d}, [x19]
frame_pop
ret
-ENDPROC(sha512_ce_transform)
+SYM_FUNC_END(sha512_ce_transform)
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..dc890a719f54 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
kernel_neon_end();
return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}
kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
if (!crypto_simd_usable()) {
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}
kernel_neon_begin();
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..78d3083de6b7 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha512_block_data_order);
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
}
static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sm3-ce-core.S b/arch/arm64/crypto/sm3-ce-core.S
index d50d187906cb..ef97d3187cb7 100644
--- a/arch/arm64/crypto/sm3-ce-core.S
+++ b/arch/arm64/crypto/sm3-ce-core.S
@@ -73,7 +73,7 @@
* int blocks)
*/
.text
-ENTRY(sm3_ce_transform)
+SYM_FUNC_START(sm3_ce_transform)
/* load state */
ld1 {v8.4s-v9.4s}, [x0]
rev64 v8.4s, v8.4s
@@ -131,7 +131,7 @@ CPU_LE( rev32 v3.16b, v3.16b )
ext v9.16b, v9.16b, v9.16b, #8
st1 {v8.4s-v9.4s}, [x0]
ret
-ENDPROC(sm3_ce_transform)
+SYM_FUNC_END(sm3_ce_transform)
.section ".rodata", "a"
.align 3
diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S
index af3bfbc3f4d4..4ac6cfbc5797 100644
--- a/arch/arm64/crypto/sm4-ce-core.S
+++ b/arch/arm64/crypto/sm4-ce-core.S
@@ -15,7 +15,7 @@
* void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in);
*/
.text
-ENTRY(sm4_ce_do_crypt)
+SYM_FUNC_START(sm4_ce_do_crypt)
ld1 {v8.4s}, [x2]
ld1 {v0.4s-v3.4s}, [x0], #64
CPU_LE( rev32 v8.16b, v8.16b )
@@ -33,4 +33,4 @@ CPU_LE( rev32 v8.16b, v8.16b )
CPU_LE( rev32 v8.16b, v8.16b )
st1 {v8.4s}, [x1]
ret
-ENDPROC(sm4_ce_do_crypt)
+SYM_FUNC_END(sm4_ce_do_crypt)
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
index 7d1d2425746f..faa88a6a74c0 100644
--- a/arch/mips/crypto/crc32-mips.c
+++ b/arch/mips/crypto/crc32-mips.c
@@ -177,10 +177,8 @@ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(mctx->key)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(mctx->key))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
}
diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
index b37d29cf5d0a..fc881b46d911 100644
--- a/arch/mips/crypto/poly1305-glue.c
+++ b/arch/mips/crypto/poly1305-glue.c
@@ -15,7 +15,7 @@
asmlinkage void poly1305_init_mips(void *state, const u8 *key);
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
-asmlinkage void poly1305_emit_mips(void *state, __le32 *digest, const u32 *nonce);
+asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
{
@@ -134,9 +134,6 @@ EXPORT_SYMBOL(poly1305_update_arch);
void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
{
- __le32 digest[4];
- u64 f = 0;
-
if (unlikely(dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
@@ -144,18 +141,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
}
- poly1305_emit_mips(&dctx->h, digest, dctx->s);
-
- /* mac = (h + s) % (2^128) */
- f = (f >> 32) + le32_to_cpu(digest[0]);
- put_unaligned_le32(f, dst);
- f = (f >> 32) + le32_to_cpu(digest[1]);
- put_unaligned_le32(f, dst + 4);
- f = (f >> 32) + le32_to_cpu(digest[2]);
- put_unaligned_le32(f, dst + 8);
- f = (f >> 32) + le32_to_cpu(digest[3]);
- put_unaligned_le32(f, dst + 12);
-
+ poly1305_emit_mips(&dctx->h, dst, dctx->s);
*dctx = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL(poly1305_final_arch);
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c
index 1fad5d4c658d..c2b23b69d7b1 100644
--- a/arch/powerpc/crypto/aes-spe-glue.c
+++ b/arch/powerpc/crypto/aes-spe-glue.c
@@ -94,13 +94,6 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
{
struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
switch (key_len) {
case AES_KEYSIZE_128:
ctx->rounds = 4;
@@ -114,6 +107,8 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
ctx->rounds = 6;
ppc_expand_key_256(ctx->key_enc, in_key);
break;
+ default:
+ return -EINVAL;
}
ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
@@ -139,13 +134,6 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
key_len >>= 1;
- if (key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
switch (key_len) {
case AES_KEYSIZE_128:
ctx->rounds = 4;
@@ -162,6 +150,8 @@ static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
ppc_expand_key_256(ctx->key_enc, in_key);
ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256);
break;
+ default:
+ return -EINVAL;
}
ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len);
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 2c232898b933..63760b7dbb76 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -73,10 +73,8 @@ static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key,
{
u32 *mctx = crypto_shash_ctx(hash);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index ead0b2c9881d..1c23d84a9097 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -72,19 +72,12 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- int ret;
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
- return ret;
+ return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
@@ -182,18 +175,13 @@ static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
- int ret;
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(sctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(sctx->fallback.skcipher) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
}
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
@@ -389,17 +377,12 @@ static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
- int ret;
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(xts_ctx->fallback,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(xts_ctx->fallback) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
}
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -414,10 +397,8 @@ static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
/* In fips mode only 128 bit or 256 bit keys are valid */
- if (fips_enabled && key_len != 32 && key_len != 64) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (fips_enabled && key_len != 32 && key_len != 64)
return -EINVAL;
- }
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 423ee05887e6..fafecad20752 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -111,10 +111,8 @@ static int crc32_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
{
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (newkeylen != sizeof(mctx->key)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (newkeylen != sizeof(mctx->key))
return -EINVAL;
- }
mctx->key = le32_to_cpu(*(__le32 *)newkey);
return 0;
}
@@ -124,10 +122,8 @@ static int crc32be_vx_setkey(struct crypto_shash *tfm, const u8 *newkey,
{
struct crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (newkeylen != sizeof(mctx->key)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (newkeylen != sizeof(mctx->key))
return -EINVAL;
- }
mctx->key = be32_to_cpu(*(__be32 *)newkey);
return 0;
}
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index a3e7400e031c..6b07a2f1ce8a 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -43,10 +43,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
- if (keylen != GHASH_BLOCK_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
- }
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index c7119c617b6e..e2a85783f804 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -151,11 +151,7 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
if (rc)
return rc;
- if (__paes_set_key(ctx)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return 0;
+ return __paes_set_key(ctx);
}
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
@@ -254,11 +250,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
if (rc)
return rc;
- if (__cbc_paes_set_key(ctx)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return 0;
+ return __cbc_paes_set_key(ctx);
}
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
@@ -386,10 +378,9 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
if (rc)
return rc;
- if (__xts_paes_set_key(ctx)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ rc = __xts_paes_set_key(ctx);
+ if (rc)
+ return rc;
/*
* xts_check_key verifies the key length is not odd and makes
@@ -526,11 +517,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
if (rc)
return rc;
- if (__ctr_paes_set_key(ctx)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return 0;
+ return __ctr_paes_set_key(ctx);
}
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 0f5a501c95a9..e3d2138ff9e2 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -169,7 +169,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
switch (key_len) {
case AES_KEYSIZE_128:
@@ -188,7 +187,6 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 1700f863748c..aaa9714378e6 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -39,12 +39,9 @@ static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
{
struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
const u32 *in_key = (const u32 *) _in_key;
- u32 *flags = &tfm->crt_flags;
- if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
- }
ctx->key_len = key_len;
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index 1299073285a3..4e9323229e71 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -33,10 +33,8 @@ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
{
u32 *mctx = crypto_shash_ctx(hash);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
*(__le32 *)mctx = le32_to_cpup((__le32 *)key);
return 0;
}
diff --git a/arch/x86/crypto/.gitignore b/arch/x86/crypto/.gitignore
new file mode 100644
index 000000000000..30be0400a439
--- /dev/null
+++ b/arch/x86/crypto/.gitignore
@@ -0,0 +1 @@
+poly1305-x86_64-cryptogams.S
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 958440eae27e..b69e00bf20b8 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -73,6 +73,10 @@ aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
+poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
+ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),)
+targets += poly1305-x86_64-cryptogams.S
+endif
ifeq ($(avx_supported),yes)
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
@@ -101,10 +105,8 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
-poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
ifeq ($(avx2_supported),yes)
sha1-ssse3-y += sha1_avx2_x86_64_asm.o
-poly1305-x86_64-y += poly1305-avx2-x86_64.o
endif
ifeq ($(sha1_ni_supported),yes)
sha1-ssse3-y += sha1_ni_asm.o
@@ -118,3 +120,8 @@ sha256-ssse3-y += sha256_ni_asm.o
endif
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $< > $@
+$(obj)/%.S: $(src)/%.pl FORCE
+ $(call if_changed,perlasm)
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 46d227122643..4623189000d8 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -144,10 +144,8 @@ static int crypto_aegis128_aesni_setkey(struct crypto_aead *aead, const u8 *key,
{
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(aead);
- if (keylen != AEGIS128_KEY_SIZE) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AEGIS128_KEY_SIZE)
return -EINVAL;
- }
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index d28503f99f58..cad6e1bfa7d5 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1942,7 +1942,7 @@ SYM_FUNC_START(aesni_set_key)
SYM_FUNC_END(aesni_set_key)
/*
- * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ * void aesni_enc(const void *ctx, u8 *dst, const u8 *src)
*/
SYM_FUNC_START(aesni_enc)
FRAME_BEGIN
@@ -2131,7 +2131,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
SYM_FUNC_END(_aesni_enc4)
/*
- * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ * void aesni_dec (const void *ctx, u8 *dst, const u8 *src)
*/
SYM_FUNC_START(aesni_dec)
FRAME_BEGIN
@@ -2716,8 +2716,8 @@ SYM_FUNC_END(aesni_ctr_enc)
pxor CTR, IV;
/*
- * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
- * bool enc, u8 *iv)
+ * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, bool enc, le128 *iv)
*/
SYM_FUNC_START(aesni_xts_crypt8)
FRAME_BEGIN
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 3e707e81afdb..bbbebbd35b5d 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -83,10 +83,8 @@ struct gcm_context_data {
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
-asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in);
-asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in);
+asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
+asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
@@ -106,8 +104,8 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
-asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, bool enc, u8 *iv);
+asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, bool enc, le128 *iv);
/* asmlinkage void aesni_gcm_enc()
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
@@ -318,14 +316,11 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
const u8 *in_key, unsigned int key_len)
{
struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
- u32 *flags = &tfm->crt_flags;
int err;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ key_len != AES_KEYSIZE_256)
return -EINVAL;
- }
if (!crypto_simd_usable())
err = aes_expandkey(ctx, in_key, key_len);
@@ -550,29 +545,24 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
-static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
-{
- aesni_enc(ctx, out, in);
-}
-
-static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
}
-static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
}
-static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
+ aesni_xts_crypt8(ctx, dst, src, true, iv);
}
-static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
+ aesni_xts_crypt8(ctx, dst, src, false, iv);
}
static const struct common_glue_ctx aesni_enc_xts = {
@@ -581,10 +571,10 @@ static const struct common_glue_ctx aesni_enc_xts = {
.funcs = { {
.num_blocks = 8,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
+ .fn_u = { .xts = aesni_xts_enc8 }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
+ .fn_u = { .xts = aesni_xts_enc }
} }
};
@@ -594,10 +584,10 @@ static const struct common_glue_ctx aesni_dec_xts = {
.funcs = { {
.num_blocks = 8,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
+ .fn_u = { .xts = aesni_xts_dec8 }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
+ .fn_u = { .xts = aesni_xts_dec }
} }
};
@@ -606,8 +596,7 @@ static int xts_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&aesni_enc_xts, req,
- XTS_TWEAK_CAST(aesni_xts_tweak),
+ return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
aes_ctx(ctx->raw_tweak_ctx),
aes_ctx(ctx->raw_crypt_ctx),
false);
@@ -618,8 +607,7 @@ static int xts_decrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&aesni_dec_xts, req,
- XTS_TWEAK_CAST(aesni_xts_tweak),
+ return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
aes_ctx(ctx->raw_tweak_ctx),
aes_ctx(ctx->raw_crypt_ctx),
true);
@@ -650,10 +638,9 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
{
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
- if (key_len < 4) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (key_len < 4)
return -EINVAL;
- }
+
/*Account for 4 byte nonce at the end.*/
key_len -= 4;
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
index 1d9ff8a45e1f..06ef2d4a4701 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -64,10 +64,8 @@ static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
return -EINVAL;
- }
memcpy(tctx->key, key, keylen);
tctx->keylen = keylen;
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index a4f00128ea55..ccda647422d6 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -19,20 +19,17 @@
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
/* 32-way AVX2/AES-NI parallel cipher functions */
-asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
-asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
static const struct common_glue_ctx camellia_enc = {
.num_funcs = 4,
@@ -40,16 +37,16 @@ static const struct common_glue_ctx camellia_enc = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) }
+ .fn_u = { .ecb = camellia_ecb_enc_32way }
}, {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
+ .fn_u = { .ecb = camellia_ecb_enc_16way }
}, {
.num_blocks = 2,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+ .fn_u = { .ecb = camellia_enc_blk_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+ .fn_u = { .ecb = camellia_enc_blk }
} }
};
@@ -59,16 +56,16 @@ static const struct common_glue_ctx camellia_ctr = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) }
+ .fn_u = { .ctr = camellia_ctr_32way }
}, {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
+ .fn_u = { .ctr = camellia_ctr_16way }
}, {
.num_blocks = 2,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+ .fn_u = { .ctr = camellia_crypt_ctr_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+ .fn_u = { .ctr = camellia_crypt_ctr }
} }
};
@@ -78,13 +75,13 @@ static const struct common_glue_ctx camellia_enc_xts = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) }
+ .fn_u = { .xts = camellia_xts_enc_32way }
}, {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) }
+ .fn_u = { .xts = camellia_xts_enc_16way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) }
+ .fn_u = { .xts = camellia_xts_enc }
} }
};
@@ -94,16 +91,16 @@ static const struct common_glue_ctx camellia_dec = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) }
+ .fn_u = { .ecb = camellia_ecb_dec_32way }
}, {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
+ .fn_u = { .ecb = camellia_ecb_dec_16way }
}, {
.num_blocks = 2,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+ .fn_u = { .ecb = camellia_dec_blk_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+ .fn_u = { .ecb = camellia_dec_blk }
} }
};
@@ -113,16 +110,16 @@ static const struct common_glue_ctx camellia_dec_cbc = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) }
+ .fn_u = { .cbc = camellia_cbc_dec_32way }
}, {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
+ .fn_u = { .cbc = camellia_cbc_dec_16way }
}, {
.num_blocks = 2,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+ .fn_u = { .cbc = camellia_decrypt_cbc_2way }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+ .fn_u = { .cbc = camellia_dec_blk }
} }
};
@@ -132,21 +129,20 @@ static const struct common_glue_ctx camellia_dec_xts = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) }
+ .fn_u = { .xts = camellia_xts_dec_32way }
}, {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) }
+ .fn_u = { .xts = camellia_xts_dec_16way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) }
+ .fn_u = { .xts = camellia_xts_dec }
} }
};
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
- &tfm->base.crt_flags);
+ return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
@@ -161,8 +157,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
- req);
+ return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -180,8 +175,7 @@ static int xts_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&camellia_enc_xts, req,
- XTS_TWEAK_CAST(camellia_enc_blk),
+ return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
}
@@ -190,8 +184,7 @@ static int xts_decrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&camellia_dec_xts, req,
- XTS_TWEAK_CAST(camellia_enc_blk),
+ return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
}
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index f28d282779b8..4e5de6ef206e 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -18,41 +18,36 @@
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
/* 16-way parallel cipher functions (avx/aes-ni) */
-asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way);
-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
-asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
EXPORT_SYMBOL_GPL(camellia_ctr_16way);
-asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
EXPORT_SYMBOL_GPL(camellia_xts_enc_16way);
-asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
EXPORT_SYMBOL_GPL(camellia_xts_dec_16way);
-void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(camellia_enc_blk));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk);
}
EXPORT_SYMBOL_GPL(camellia_xts_enc);
-void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(camellia_dec_blk));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk);
}
EXPORT_SYMBOL_GPL(camellia_xts_dec);
@@ -62,13 +57,13 @@ static const struct common_glue_ctx camellia_enc = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
+ .fn_u = { .ecb = camellia_ecb_enc_16way }
}, {
.num_blocks = 2,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+ .fn_u = { .ecb = camellia_enc_blk_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+ .fn_u = { .ecb = camellia_enc_blk }
} }
};
@@ -78,13 +73,13 @@ static const struct common_glue_ctx camellia_ctr = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
+ .fn_u = { .ctr = camellia_ctr_16way }
}, {
.num_blocks = 2,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+ .fn_u = { .ctr = camellia_crypt_ctr_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+ .fn_u = { .ctr = camellia_crypt_ctr }
} }
};
@@ -94,10 +89,10 @@ static const struct common_glue_ctx camellia_enc_xts = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) }
+ .fn_u = { .xts = camellia_xts_enc_16way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) }
+ .fn_u = { .xts = camellia_xts_enc }
} }
};
@@ -107,13 +102,13 @@ static const struct common_glue_ctx camellia_dec = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
+ .fn_u = { .ecb = camellia_ecb_dec_16way }
}, {
.num_blocks = 2,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+ .fn_u = { .ecb = camellia_dec_blk_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+ .fn_u = { .ecb = camellia_dec_blk }
} }
};
@@ -123,13 +118,13 @@ static const struct common_glue_ctx camellia_dec_cbc = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
+ .fn_u = { .cbc = camellia_cbc_dec_16way }
}, {
.num_blocks = 2,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+ .fn_u = { .cbc = camellia_decrypt_cbc_2way }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+ .fn_u = { .cbc = camellia_dec_blk }
} }
};
@@ -139,18 +134,17 @@ static const struct common_glue_ctx camellia_dec_xts = {
.funcs = { {
.num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) }
+ .fn_u = { .xts = camellia_xts_dec_16way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) }
+ .fn_u = { .xts = camellia_xts_dec }
} }
};
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
- &tfm->base.crt_flags);
+ return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
static int ecb_encrypt(struct skcipher_request *req)
@@ -165,8 +159,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
- req);
+ return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -183,7 +176,6 @@ int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int err;
err = xts_verify_key(tfm, key, keylen);
@@ -191,13 +183,12 @@ int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
return err;
/* first half of xts-key is for crypt */
- err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2);
if (err)
return err;
/* second half of xts-key is for tweak */
- return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
+ return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
}
EXPORT_SYMBOL_GPL(xts_camellia_setkey);
@@ -206,8 +197,7 @@ static int xts_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&camellia_enc_xts, req,
- XTS_TWEAK_CAST(camellia_enc_blk),
+ return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
}
@@ -216,8 +206,7 @@ static int xts_decrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&camellia_dec_xts, req,
- XTS_TWEAK_CAST(camellia_enc_blk),
+ return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
}
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index 7c62db56ffe1..242c056e5fa8 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -18,19 +18,17 @@
#include <asm/crypto/glue_helper.h>
/* regular block cipher functions */
-asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
+asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
+ bool xor);
EXPORT_SYMBOL_GPL(__camellia_enc_blk);
-asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_dec_blk);
/* 2-way parallel cipher functions */
-asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
+asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
+ bool xor);
EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
-asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -1229,12 +1227,10 @@ static void camellia_setup192(const unsigned char *key, u64 *subkey)
}
int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
- unsigned int key_len, u32 *flags)
+ unsigned int key_len)
{
- if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
- }
cctx->key_length = key_len;
@@ -1257,8 +1253,7 @@ EXPORT_SYMBOL_GPL(__camellia_setkey);
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
- return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len,
- &tfm->crt_flags);
+ return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len);
}
static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
@@ -1267,8 +1262,10 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return camellia_setkey(&tfm->base, key, key_len);
}
-void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
+void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s)
{
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
u128 iv = *src;
camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
@@ -1277,9 +1274,11 @@ void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
}
EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
-void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblk;
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
if (dst != src)
*dst = *src;
@@ -1291,9 +1290,11 @@ void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
}
EXPORT_SYMBOL_GPL(camellia_crypt_ctr);
-void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblks[2];
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
if (dst != src) {
dst[0] = src[0];
@@ -1315,10 +1316,10 @@ static const struct common_glue_ctx camellia_enc = {
.funcs = { {
.num_blocks = 2,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+ .fn_u = { .ecb = camellia_enc_blk_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+ .fn_u = { .ecb = camellia_enc_blk }
} }
};
@@ -1328,10 +1329,10 @@ static const struct common_glue_ctx camellia_ctr = {
.funcs = { {
.num_blocks = 2,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+ .fn_u = { .ctr = camellia_crypt_ctr_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+ .fn_u = { .ctr = camellia_crypt_ctr }
} }
};
@@ -1341,10 +1342,10 @@ static const struct common_glue_ctx camellia_dec = {
.funcs = { {
.num_blocks = 2,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+ .fn_u = { .ecb = camellia_dec_blk_2way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+ .fn_u = { .ecb = camellia_dec_blk }
} }
};
@@ -1354,10 +1355,10 @@ static const struct common_glue_ctx camellia_dec_cbc = {
.funcs = { {
.num_blocks = 2,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+ .fn_u = { .cbc = camellia_decrypt_cbc_2way }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+ .fn_u = { .cbc = camellia_dec_blk }
} }
};
@@ -1373,8 +1374,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
- req);
+ return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
}
static int cbc_decrypt(struct skcipher_request *req)
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index a8a38fffb4a9..48e0f37796fa 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -20,20 +20,17 @@
#define CAST6_PARALLEL_BLOCKS 8
-asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src);
-
-asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src,
+asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
+
+asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -41,21 +38,21 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
return cast6_setkey(&tfm->base, key, keylen);
}
-static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(__cast6_encrypt));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt);
}
-static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(__cast6_decrypt));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt);
}
-static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblk;
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
le128_to_be128(&ctrblk, iv);
le128_inc(iv);
@@ -70,10 +67,10 @@ static const struct common_glue_ctx cast6_enc = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) }
+ .fn_u = { .ecb = cast6_ecb_enc_8way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) }
+ .fn_u = { .ecb = __cast6_encrypt }
} }
};
@@ -83,10 +80,10 @@ static const struct common_glue_ctx cast6_ctr = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) }
+ .fn_u = { .ctr = cast6_ctr_8way }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) }
+ .fn_u = { .ctr = cast6_crypt_ctr }
} }
};
@@ -96,10 +93,10 @@ static const struct common_glue_ctx cast6_enc_xts = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) }
+ .fn_u = { .xts = cast6_xts_enc_8way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) }
+ .fn_u = { .xts = cast6_xts_enc }
} }
};
@@ -109,10 +106,10 @@ static const struct common_glue_ctx cast6_dec = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) }
+ .fn_u = { .ecb = cast6_ecb_dec_8way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) }
+ .fn_u = { .ecb = __cast6_decrypt }
} }
};
@@ -122,10 +119,10 @@ static const struct common_glue_ctx cast6_dec_cbc = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) }
+ .fn_u = { .cbc = cast6_cbc_dec_8way }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) }
+ .fn_u = { .cbc = __cast6_decrypt }
} }
};
@@ -135,10 +132,10 @@ static const struct common_glue_ctx cast6_dec_xts = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) }
+ .fn_u = { .xts = cast6_xts_dec_8way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) }
+ .fn_u = { .xts = cast6_xts_dec }
} }
};
@@ -154,8 +151,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
- req);
+ return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -177,7 +173,6 @@ static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int err;
err = xts_verify_key(tfm, key, keylen);
@@ -185,13 +180,12 @@ static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
return err;
/* first half of xts-key is for crypt */
- err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2);
if (err)
return err;
/* second half of xts-key is for tweak */
- return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
+ return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
}
static int xts_encrypt(struct skcipher_request *req)
@@ -199,8 +193,7 @@ static int xts_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&cast6_enc_xts, req,
- XTS_TWEAK_CAST(__cast6_encrypt),
+ return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt,
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
}
@@ -209,8 +202,7 @@ static int xts_decrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&cast6_dec_xts, req,
- XTS_TWEAK_CAST(__cast6_encrypt),
+ return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt,
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
}
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index cb4ab6645106..418bd88acac8 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -94,10 +94,8 @@ static int crc32_pclmul_setkey(struct crypto_shash *hash, const u8 *key,
{
u32 *mctx = crypto_shash_ctx(hash);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index eefa0862f309..c20d1b8a82c3 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -91,10 +91,8 @@ static int crc32c_intel_setkey(struct crypto_shash *hash, const u8 *key,
{
u32 *mctx = crypto_shash_ctx(hash);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 04d72a5a8ce9..a4b728518e28 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -57,10 +57,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
be128 *x = (be128 *)key;
u64 a, b;
- if (keylen != GHASH_BLOCK_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
- }
/* perform multiplication by 'x' in GF(2^128) */
a = be64_to_cpu(x->a);
@@ -257,16 +255,11 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
- int err;
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(child, key, keylen);
- crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
- & CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_ahash_setkey(child, key, keylen);
}
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index d15b99397480..d3d91a0abf88 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -134,7 +134,8 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
src -= num_blocks - 1;
dst -= num_blocks - 1;
- gctx->funcs[i].fn_u.cbc(ctx, dst, src);
+ gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
+ (const u8 *)src);
nbytes -= func_bytes;
if (nbytes < bsize)
@@ -188,7 +189,9 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
/* Process multi-block batch */
do {
- gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
+ gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
+ (const u8 *)src,
+ &ctrblk);
src += num_blocks;
dst += num_blocks;
nbytes -= func_bytes;
@@ -210,7 +213,8 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
be128_to_le128(&ctrblk, (be128 *)walk.iv);
memcpy(&tmp, walk.src.virt.addr, nbytes);
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
+ gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
+ (const u8 *)&tmp,
&ctrblk);
memcpy(walk.dst.virt.addr, &tmp, nbytes);
le128_to_be128((be128 *)walk.iv, &ctrblk);
@@ -240,7 +244,8 @@ static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
if (nbytes >= func_bytes) {
do {
- gctx->funcs[i].fn_u.xts(ctx, dst, src,
+ gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst,
+ (const u8 *)src,
walk->iv);
src += num_blocks;
@@ -354,8 +359,8 @@ out:
}
EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
-void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
- common_glue_func_t fn)
+void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv, common_glue_func_t fn)
{
le128 ivblk = *iv;
@@ -363,13 +368,13 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
gf128mul_x_ble(iv, &ivblk);
/* CC <- T xor C */
- u128_xor(dst, src, (u128 *)&ivblk);
+ u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk);
/* PP <- D(Key2,CC) */
- fn(ctx, (u8 *)dst, (u8 *)dst);
+ fn(ctx, dst, dst);
/* P <- T xor PP */
- u128_xor(dst, dst, (u128 *)&ivblk);
+ u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk);
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
deleted file mode 100644
index d6063feda9da..000000000000
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ /dev/null
@@ -1,390 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
- *
- * Copyright (C) 2015 Martin Willi
- */
-
-#include <linux/linkage.h>
-
-.section .rodata.cst32.ANMASK, "aM", @progbits, 32
-.align 32
-ANMASK: .octa 0x0000000003ffffff0000000003ffffff
- .octa 0x0000000003ffffff0000000003ffffff
-
-.section .rodata.cst32.ORMASK, "aM", @progbits, 32
-.align 32
-ORMASK: .octa 0x00000000010000000000000001000000
- .octa 0x00000000010000000000000001000000
-
-.text
-
-#define h0 0x00(%rdi)
-#define h1 0x04(%rdi)
-#define h2 0x08(%rdi)
-#define h3 0x0c(%rdi)
-#define h4 0x10(%rdi)
-#define r0 0x00(%rdx)
-#define r1 0x04(%rdx)
-#define r2 0x08(%rdx)
-#define r3 0x0c(%rdx)
-#define r4 0x10(%rdx)
-#define u0 0x00(%r8)
-#define u1 0x04(%r8)
-#define u2 0x08(%r8)
-#define u3 0x0c(%r8)
-#define u4 0x10(%r8)
-#define w0 0x14(%r8)
-#define w1 0x18(%r8)
-#define w2 0x1c(%r8)
-#define w3 0x20(%r8)
-#define w4 0x24(%r8)
-#define y0 0x28(%r8)
-#define y1 0x2c(%r8)
-#define y2 0x30(%r8)
-#define y3 0x34(%r8)
-#define y4 0x38(%r8)
-#define m %rsi
-#define hc0 %ymm0
-#define hc1 %ymm1
-#define hc2 %ymm2
-#define hc3 %ymm3
-#define hc4 %ymm4
-#define hc0x %xmm0
-#define hc1x %xmm1
-#define hc2x %xmm2
-#define hc3x %xmm3
-#define hc4x %xmm4
-#define t1 %ymm5
-#define t2 %ymm6
-#define t1x %xmm5
-#define t2x %xmm6
-#define ruwy0 %ymm7
-#define ruwy1 %ymm8
-#define ruwy2 %ymm9
-#define ruwy3 %ymm10
-#define ruwy4 %ymm11
-#define ruwy0x %xmm7
-#define ruwy1x %xmm8
-#define ruwy2x %xmm9
-#define ruwy3x %xmm10
-#define ruwy4x %xmm11
-#define svxz1 %ymm12
-#define svxz2 %ymm13
-#define svxz3 %ymm14
-#define svxz4 %ymm15
-#define d0 %r9
-#define d1 %r10
-#define d2 %r11
-#define d3 %r12
-#define d4 %r13
-
-SYM_FUNC_START(poly1305_4block_avx2)
- # %rdi: Accumulator h[5]
- # %rsi: 64 byte input block m
- # %rdx: Poly1305 key r[5]
- # %rcx: Quadblock count
- # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
-
- # This four-block variant uses loop unrolled block processing. It
- # requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
- # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
-
- vzeroupper
- push %rbx
- push %r12
- push %r13
-
- # combine r0,u0,w0,y0
- vmovd y0,ruwy0x
- vmovd w0,t1x
- vpunpcklqdq t1,ruwy0,ruwy0
- vmovd u0,t1x
- vmovd r0,t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,ruwy0,ruwy0
-
- # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
- vmovd y1,ruwy1x
- vmovd w1,t1x
- vpunpcklqdq t1,ruwy1,ruwy1
- vmovd u1,t1x
- vmovd r1,t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,ruwy1,ruwy1
- vpslld $2,ruwy1,svxz1
- vpaddd ruwy1,svxz1,svxz1
-
- # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
- vmovd y2,ruwy2x
- vmovd w2,t1x
- vpunpcklqdq t1,ruwy2,ruwy2
- vmovd u2,t1x
- vmovd r2,t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,ruwy2,ruwy2
- vpslld $2,ruwy2,svxz2
- vpaddd ruwy2,svxz2,svxz2
-
- # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
- vmovd y3,ruwy3x
- vmovd w3,t1x
- vpunpcklqdq t1,ruwy3,ruwy3
- vmovd u3,t1x
- vmovd r3,t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,ruwy3,ruwy3
- vpslld $2,ruwy3,svxz3
- vpaddd ruwy3,svxz3,svxz3
-
- # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
- vmovd y4,ruwy4x
- vmovd w4,t1x
- vpunpcklqdq t1,ruwy4,ruwy4
- vmovd u4,t1x
- vmovd r4,t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,ruwy4,ruwy4
- vpslld $2,ruwy4,svxz4
- vpaddd ruwy4,svxz4,svxz4
-
-.Ldoblock4:
- # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
- # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
- vmovd 0x00(m),hc0x
- vmovd 0x10(m),t1x
- vpunpcklqdq t1,hc0,hc0
- vmovd 0x20(m),t1x
- vmovd 0x30(m),t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,hc0,hc0
- vpand ANMASK(%rip),hc0,hc0
- vmovd h0,t1x
- vpaddd t1,hc0,hc0
- # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
- # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
- vmovd 0x03(m),hc1x
- vmovd 0x13(m),t1x
- vpunpcklqdq t1,hc1,hc1
- vmovd 0x23(m),t1x
- vmovd 0x33(m),t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,hc1,hc1
- vpsrld $2,hc1,hc1
- vpand ANMASK(%rip),hc1,hc1
- vmovd h1,t1x
- vpaddd t1,hc1,hc1
- # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
- # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
- vmovd 0x06(m),hc2x
- vmovd 0x16(m),t1x
- vpunpcklqdq t1,hc2,hc2
- vmovd 0x26(m),t1x
- vmovd 0x36(m),t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,hc2,hc2
- vpsrld $4,hc2,hc2
- vpand ANMASK(%rip),hc2,hc2
- vmovd h2,t1x
- vpaddd t1,hc2,hc2
- # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
- # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
- vmovd 0x09(m),hc3x
- vmovd 0x19(m),t1x
- vpunpcklqdq t1,hc3,hc3
- vmovd 0x29(m),t1x
- vmovd 0x39(m),t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,hc3,hc3
- vpsrld $6,hc3,hc3
- vpand ANMASK(%rip),hc3,hc3
- vmovd h3,t1x
- vpaddd t1,hc3,hc3
- # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
- # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
- vmovd 0x0c(m),hc4x
- vmovd 0x1c(m),t1x
- vpunpcklqdq t1,hc4,hc4
- vmovd 0x2c(m),t1x
- vmovd 0x3c(m),t2x
- vpunpcklqdq t2,t1,t1
- vperm2i128 $0x20,t1,hc4,hc4
- vpsrld $8,hc4,hc4
- vpor ORMASK(%rip),hc4,hc4
- vmovd h4,t1x
- vpaddd t1,hc4,hc4
-
- # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
- vpmuludq hc0,ruwy0,t1
- # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
- vpmuludq hc1,svxz4,t2
- vpaddq t2,t1,t1
- # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
- vpmuludq hc2,svxz3,t2
- vpaddq t2,t1,t1
- # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
- vpmuludq hc3,svxz2,t2
- vpaddq t2,t1,t1
- # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
- vpmuludq hc4,svxz1,t2
- vpaddq t2,t1,t1
- # d0 = t1[0] + t1[1] + t[2] + t[3]
- vpermq $0xee,t1,t2
- vpaddq t2,t1,t1
- vpsrldq $8,t1,t2
- vpaddq t2,t1,t1
- vmovq t1x,d0
-
- # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
- vpmuludq hc0,ruwy1,t1
- # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
- vpmuludq hc1,ruwy0,t2
- vpaddq t2,t1,t1
- # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
- vpmuludq hc2,svxz4,t2
- vpaddq t2,t1,t1
- # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
- vpmuludq hc3,svxz3,t2
- vpaddq t2,t1,t1
- # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
- vpmuludq hc4,svxz2,t2
- vpaddq t2,t1,t1
- # d1 = t1[0] + t1[1] + t1[3] + t1[4]
- vpermq $0xee,t1,t2
- vpaddq t2,t1,t1
- vpsrldq $8,t1,t2
- vpaddq t2,t1,t1
- vmovq t1x,d1
-
- # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
- vpmuludq hc0,ruwy2,t1
- # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
- vpmuludq hc1,ruwy1,t2
- vpaddq t2,t1,t1
- # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
- vpmuludq hc2,ruwy0,t2
- vpaddq t2,t1,t1
- # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
- vpmuludq hc3,svxz4,t2
- vpaddq t2,t1,t1
- # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
- vpmuludq hc4,svxz3,t2
- vpaddq t2,t1,t1
- # d2 = t1[0] + t1[1] + t1[2] + t1[3]
- vpermq $0xee,t1,t2
- vpaddq t2,t1,t1
- vpsrldq $8,t1,t2
- vpaddq t2,t1,t1
- vmovq t1x,d2
-
- # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
- vpmuludq hc0,ruwy3,t1
- # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
- vpmuludq hc1,ruwy2,t2
- vpaddq t2,t1,t1
- # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
- vpmuludq hc2,ruwy1,t2
- vpaddq t2,t1,t1
- # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
- vpmuludq hc3,ruwy0,t2
- vpaddq t2,t1,t1
- # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
- vpmuludq hc4,svxz4,t2
- vpaddq t2,t1,t1
- # d3 = t1[0] + t1[1] + t1[2] + t1[3]
- vpermq $0xee,t1,t2
- vpaddq t2,t1,t1
- vpsrldq $8,t1,t2
- vpaddq t2,t1,t1
- vmovq t1x,d3
-
- # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
- vpmuludq hc0,ruwy4,t1
- # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
- vpmuludq hc1,ruwy3,t2
- vpaddq t2,t1,t1
- # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
- vpmuludq hc2,ruwy2,t2
- vpaddq t2,t1,t1
- # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
- vpmuludq hc3,ruwy1,t2
- vpaddq t2,t1,t1
- # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
- vpmuludq hc4,ruwy0,t2
- vpaddq t2,t1,t1
- # d4 = t1[0] + t1[1] + t1[2] + t1[3]
- vpermq $0xee,t1,t2
- vpaddq t2,t1,t1
- vpsrldq $8,t1,t2
- vpaddq t2,t1,t1
- vmovq t1x,d4
-
- # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
- # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
- # amount. Careful: we must not assume the carry bits 'd0 >> 26',
- # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
- # integers. It's true in a single-block implementation, but not here.
-
- # d1 += d0 >> 26
- mov d0,%rax
- shr $26,%rax
- add %rax,d1
- # h0 = d0 & 0x3ffffff
- mov d0,%rbx
- and $0x3ffffff,%ebx
-
- # d2 += d1 >> 26
- mov d1,%rax
- shr $26,%rax
- add %rax,d2
- # h1 = d1 & 0x3ffffff
- mov d1,%rax
- and $0x3ffffff,%eax
- mov %eax,h1
-
- # d3 += d2 >> 26
- mov d2,%rax
- shr $26,%rax
- add %rax,d3
- # h2 = d2 & 0x3ffffff
- mov d2,%rax
- and $0x3ffffff,%eax
- mov %eax,h2
-
- # d4 += d3 >> 26
- mov d3,%rax
- shr $26,%rax
- add %rax,d4
- # h3 = d3 & 0x3ffffff
- mov d3,%rax
- and $0x3ffffff,%eax
- mov %eax,h3
-
- # h0 += (d4 >> 26) * 5
- mov d4,%rax
- shr $26,%rax
- lea (%rax,%rax,4),%rax
- add %rax,%rbx
- # h4 = d4 & 0x3ffffff
- mov d4,%rax
- and $0x3ffffff,%eax
- mov %eax,h4
-
- # h1 += h0 >> 26
- mov %rbx,%rax
- shr $26,%rax
- add %eax,h1
- # h0 = h0 & 0x3ffffff
- andl $0x3ffffff,%ebx
- mov %ebx,h0
-
- add $0x40,m
- dec %rcx
- jnz .Ldoblock4
-
- vzeroupper
- pop %r13
- pop %r12
- pop %rbx
- ret
-SYM_FUNC_END(poly1305_4block_avx2)
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
deleted file mode 100644
index d8ea29b96640..000000000000
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ /dev/null
@@ -1,590 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
- *
- * Copyright (C) 2015 Martin Willi
- */
-
-#include <linux/linkage.h>
-
-.section .rodata.cst16.ANMASK, "aM", @progbits, 16
-.align 16
-ANMASK: .octa 0x0000000003ffffff0000000003ffffff
-
-.section .rodata.cst16.ORMASK, "aM", @progbits, 16
-.align 16
-ORMASK: .octa 0x00000000010000000000000001000000
-
-.text
-
-#define h0 0x00(%rdi)
-#define h1 0x04(%rdi)
-#define h2 0x08(%rdi)
-#define h3 0x0c(%rdi)
-#define h4 0x10(%rdi)
-#define r0 0x00(%rdx)
-#define r1 0x04(%rdx)
-#define r2 0x08(%rdx)
-#define r3 0x0c(%rdx)
-#define r4 0x10(%rdx)
-#define s1 0x00(%rsp)
-#define s2 0x04(%rsp)
-#define s3 0x08(%rsp)
-#define s4 0x0c(%rsp)
-#define m %rsi
-#define h01 %xmm0
-#define h23 %xmm1
-#define h44 %xmm2
-#define t1 %xmm3
-#define t2 %xmm4
-#define t3 %xmm5
-#define t4 %xmm6
-#define mask %xmm7
-#define d0 %r8
-#define d1 %r9
-#define d2 %r10
-#define d3 %r11
-#define d4 %r12
-
-SYM_FUNC_START(poly1305_block_sse2)
- # %rdi: Accumulator h[5]
- # %rsi: 16 byte input block m
- # %rdx: Poly1305 key r[5]
- # %rcx: Block count
-
- # This single block variant tries to improve performance by doing two
- # multiplications in parallel using SSE instructions. There is quite
- # some quardword packing involved, hence the speedup is marginal.
-
- push %rbx
- push %r12
- sub $0x10,%rsp
-
- # s1..s4 = r1..r4 * 5
- mov r1,%eax
- lea (%eax,%eax,4),%eax
- mov %eax,s1
- mov r2,%eax
- lea (%eax,%eax,4),%eax
- mov %eax,s2
- mov r3,%eax
- lea (%eax,%eax,4),%eax
- mov %eax,s3
- mov r4,%eax
- lea (%eax,%eax,4),%eax
- mov %eax,s4
-
- movdqa ANMASK(%rip),mask
-
-.Ldoblock:
- # h01 = [0, h1, 0, h0]
- # h23 = [0, h3, 0, h2]
- # h44 = [0, h4, 0, h4]
- movd h0,h01
- movd h1,t1
- movd h2,h23
- movd h3,t2
- movd h4,h44
- punpcklqdq t1,h01
- punpcklqdq t2,h23
- punpcklqdq h44,h44
-
- # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
- movd 0x00(m),t1
- movd 0x03(m),t2
- psrld $2,t2
- punpcklqdq t2,t1
- pand mask,t1
- paddd t1,h01
- # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
- movd 0x06(m),t1
- movd 0x09(m),t2
- psrld $4,t1
- psrld $6,t2
- punpcklqdq t2,t1
- pand mask,t1
- paddd t1,h23
- # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
- mov 0x0c(m),%eax
- shr $8,%eax
- or $0x01000000,%eax
- movd %eax,t1
- pshufd $0xc4,t1,t1
- paddd t1,h44
-
- # t1[0] = h0 * r0 + h2 * s3
- # t1[1] = h1 * s4 + h3 * s2
- movd r0,t1
- movd s4,t2
- punpcklqdq t2,t1
- pmuludq h01,t1
- movd s3,t2
- movd s2,t3
- punpcklqdq t3,t2
- pmuludq h23,t2
- paddq t2,t1
- # t2[0] = h0 * r1 + h2 * s4
- # t2[1] = h1 * r0 + h3 * s3
- movd r1,t2
- movd r0,t3
- punpcklqdq t3,t2
- pmuludq h01,t2
- movd s4,t3
- movd s3,t4
- punpcklqdq t4,t3
- pmuludq h23,t3
- paddq t3,t2
- # t3[0] = h4 * s1
- # t3[1] = h4 * s2
- movd s1,t3
- movd s2,t4
- punpcklqdq t4,t3
- pmuludq h44,t3
- # d0 = t1[0] + t1[1] + t3[0]
- # d1 = t2[0] + t2[1] + t3[1]
- movdqa t1,t4
- punpcklqdq t2,t4
- punpckhqdq t2,t1
- paddq t4,t1
- paddq t3,t1
- movq t1,d0
- psrldq $8,t1
- movq t1,d1
-
- # t1[0] = h0 * r2 + h2 * r0
- # t1[1] = h1 * r1 + h3 * s4
- movd r2,t1
- movd r1,t2
- punpcklqdq t2,t1
- pmuludq h01,t1
- movd r0,t2
- movd s4,t3
- punpcklqdq t3,t2
- pmuludq h23,t2
- paddq t2,t1
- # t2[0] = h0 * r3 + h2 * r1
- # t2[1] = h1 * r2 + h3 * r0
- movd r3,t2
- movd r2,t3
- punpcklqdq t3,t2
- pmuludq h01,t2
- movd r1,t3
- movd r0,t4
- punpcklqdq t4,t3
- pmuludq h23,t3
- paddq t3,t2
- # t3[0] = h4 * s3
- # t3[1] = h4 * s4
- movd s3,t3
- movd s4,t4
- punpcklqdq t4,t3
- pmuludq h44,t3
- # d2 = t1[0] + t1[1] + t3[0]
- # d3 = t2[0] + t2[1] + t3[1]
- movdqa t1,t4
- punpcklqdq t2,t4
- punpckhqdq t2,t1
- paddq t4,t1
- paddq t3,t1
- movq t1,d2
- psrldq $8,t1
- movq t1,d3
-
- # t1[0] = h0 * r4 + h2 * r2
- # t1[1] = h1 * r3 + h3 * r1
- movd r4,t1
- movd r3,t2
- punpcklqdq t2,t1
- pmuludq h01,t1
- movd r2,t2
- movd r1,t3
- punpcklqdq t3,t2
- pmuludq h23,t2
- paddq t2,t1
- # t3[0] = h4 * r0
- movd r0,t3
- pmuludq h44,t3
- # d4 = t1[0] + t1[1] + t3[0]
- movdqa t1,t4
- psrldq $8,t4
- paddq t4,t1
- paddq t3,t1
- movq t1,d4
-
- # d1 += d0 >> 26
- mov d0,%rax
- shr $26,%rax
- add %rax,d1
- # h0 = d0 & 0x3ffffff
- mov d0,%rbx
- and $0x3ffffff,%ebx
-
- # d2 += d1 >> 26
- mov d1,%rax
- shr $26,%rax
- add %rax,d2
- # h1 = d1 & 0x3ffffff
- mov d1,%rax
- and $0x3ffffff,%eax
- mov %eax,h1
-
- # d3 += d2 >> 26
- mov d2,%rax
- shr $26,%rax
- add %rax,d3
- # h2 = d2 & 0x3ffffff
- mov d2,%rax
- and $0x3ffffff,%eax
- mov %eax,h2
-
- # d4 += d3 >> 26
- mov d3,%rax
- shr $26,%rax
- add %rax,d4
- # h3 = d3 & 0x3ffffff
- mov d3,%rax
- and $0x3ffffff,%eax
- mov %eax,h3
-
- # h0 += (d4 >> 26) * 5
- mov d4,%rax
- shr $26,%rax
- lea (%rax,%rax,4),%rax
- add %rax,%rbx
- # h4 = d4 & 0x3ffffff
- mov d4,%rax
- and $0x3ffffff,%eax
- mov %eax,h4
-
- # h1 += h0 >> 26
- mov %rbx,%rax
- shr $26,%rax
- add %eax,h1
- # h0 = h0 & 0x3ffffff
- andl $0x3ffffff,%ebx
- mov %ebx,h0
-
- add $0x10,m
- dec %rcx
- jnz .Ldoblock
-
- # Zeroing of key material
- mov %rcx,0x00(%rsp)
- mov %rcx,0x08(%rsp)
-
- add $0x10,%rsp
- pop %r12
- pop %rbx
- ret
-SYM_FUNC_END(poly1305_block_sse2)
-
-
-#define u0 0x00(%r8)
-#define u1 0x04(%r8)
-#define u2 0x08(%r8)
-#define u3 0x0c(%r8)
-#define u4 0x10(%r8)
-#define hc0 %xmm0
-#define hc1 %xmm1
-#define hc2 %xmm2
-#define hc3 %xmm5
-#define hc4 %xmm6
-#define ru0 %xmm7
-#define ru1 %xmm8
-#define ru2 %xmm9
-#define ru3 %xmm10
-#define ru4 %xmm11
-#define sv1 %xmm12
-#define sv2 %xmm13
-#define sv3 %xmm14
-#define sv4 %xmm15
-#undef d0
-#define d0 %r13
-
-SYM_FUNC_START(poly1305_2block_sse2)
- # %rdi: Accumulator h[5]
- # %rsi: 16 byte input block m
- # %rdx: Poly1305 key r[5]
- # %rcx: Doubleblock count
- # %r8: Poly1305 derived key r^2 u[5]
-
- # This two-block variant further improves performance by using loop
- # unrolled block processing. This is more straight forward and does
- # less byte shuffling, but requires a second Poly1305 key r^2:
- # h = (h + m) * r => h = (h + m1) * r^2 + m2 * r
-
- push %rbx
- push %r12
- push %r13
-
- # combine r0,u0
- movd u0,ru0
- movd r0,t1
- punpcklqdq t1,ru0
-
- # combine r1,u1 and s1=r1*5,v1=u1*5
- movd u1,ru1
- movd r1,t1
- punpcklqdq t1,ru1
- movdqa ru1,sv1
- pslld $2,sv1
- paddd ru1,sv1
-
- # combine r2,u2 and s2=r2*5,v2=u2*5
- movd u2,ru2
- movd r2,t1
- punpcklqdq t1,ru2
- movdqa ru2,sv2
- pslld $2,sv2
- paddd ru2,sv2
-
- # combine r3,u3 and s3=r3*5,v3=u3*5
- movd u3,ru3
- movd r3,t1
- punpcklqdq t1,ru3
- movdqa ru3,sv3
- pslld $2,sv3
- paddd ru3,sv3
-
- # combine r4,u4 and s4=r4*5,v4=u4*5
- movd u4,ru4
- movd r4,t1
- punpcklqdq t1,ru4
- movdqa ru4,sv4
- pslld $2,sv4
- paddd ru4,sv4
-
-.Ldoblock2:
- # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
- movd 0x00(m),hc0
- movd 0x10(m),t1
- punpcklqdq t1,hc0
- pand ANMASK(%rip),hc0
- movd h0,t1
- paddd t1,hc0
- # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
- movd 0x03(m),hc1
- movd 0x13(m),t1
- punpcklqdq t1,hc1
- psrld $2,hc1
- pand ANMASK(%rip),hc1
- movd h1,t1
- paddd t1,hc1
- # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
- movd 0x06(m),hc2
- movd 0x16(m),t1
- punpcklqdq t1,hc2
- psrld $4,hc2
- pand ANMASK(%rip),hc2
- movd h2,t1
- paddd t1,hc2
- # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
- movd 0x09(m),hc3
- movd 0x19(m),t1
- punpcklqdq t1,hc3
- psrld $6,hc3
- pand ANMASK(%rip),hc3
- movd h3,t1
- paddd t1,hc3
- # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
- movd 0x0c(m),hc4
- movd 0x1c(m),t1
- punpcklqdq t1,hc4
- psrld $8,hc4
- por ORMASK(%rip),hc4
- movd h4,t1
- paddd t1,hc4
-
- # t1 = [ hc0[1] * r0, hc0[0] * u0 ]
- movdqa ru0,t1
- pmuludq hc0,t1
- # t1 += [ hc1[1] * s4, hc1[0] * v4 ]
- movdqa sv4,t2
- pmuludq hc1,t2
- paddq t2,t1
- # t1 += [ hc2[1] * s3, hc2[0] * v3 ]
- movdqa sv3,t2
- pmuludq hc2,t2
- paddq t2,t1
- # t1 += [ hc3[1] * s2, hc3[0] * v2 ]
- movdqa sv2,t2
- pmuludq hc3,t2
- paddq t2,t1
- # t1 += [ hc4[1] * s1, hc4[0] * v1 ]
- movdqa sv1,t2
- pmuludq hc4,t2
- paddq t2,t1
- # d0 = t1[0] + t1[1]
- movdqa t1,t2
- psrldq $8,t2
- paddq t2,t1
- movq t1,d0
-
- # t1 = [ hc0[1] * r1, hc0[0] * u1 ]
- movdqa ru1,t1
- pmuludq hc0,t1
- # t1 += [ hc1[1] * r0, hc1[0] * u0 ]
- movdqa ru0,t2
- pmuludq hc1,t2
- paddq t2,t1
- # t1 += [ hc2[1] * s4, hc2[0] * v4 ]
- movdqa sv4,t2
- pmuludq hc2,t2
- paddq t2,t1
- # t1 += [ hc3[1] * s3, hc3[0] * v3 ]
- movdqa sv3,t2
- pmuludq hc3,t2
- paddq t2,t1
- # t1 += [ hc4[1] * s2, hc4[0] * v2 ]
- movdqa sv2,t2
- pmuludq hc4,t2
- paddq t2,t1
- # d1 = t1[0] + t1[1]
- movdqa t1,t2
- psrldq $8,t2
- paddq t2,t1
- movq t1,d1
-
- # t1 = [ hc0[1] * r2, hc0[0] * u2 ]
- movdqa ru2,t1
- pmuludq hc0,t1
- # t1 += [ hc1[1] * r1, hc1[0] * u1 ]
- movdqa ru1,t2
- pmuludq hc1,t2
- paddq t2,t1
- # t1 += [ hc2[1] * r0, hc2[0] * u0 ]
- movdqa ru0,t2
- pmuludq hc2,t2
- paddq t2,t1
- # t1 += [ hc3[1] * s4, hc3[0] * v4 ]
- movdqa sv4,t2
- pmuludq hc3,t2
- paddq t2,t1
- # t1 += [ hc4[1] * s3, hc4[0] * v3 ]
- movdqa sv3,t2
- pmuludq hc4,t2
- paddq t2,t1
- # d2 = t1[0] + t1[1]
- movdqa t1,t2
- psrldq $8,t2
- paddq t2,t1
- movq t1,d2
-
- # t1 = [ hc0[1] * r3, hc0[0] * u3 ]
- movdqa ru3,t1
- pmuludq hc0,t1
- # t1 += [ hc1[1] * r2, hc1[0] * u2 ]
- movdqa ru2,t2
- pmuludq hc1,t2
- paddq t2,t1
- # t1 += [ hc2[1] * r1, hc2[0] * u1 ]
- movdqa ru1,t2
- pmuludq hc2,t2
- paddq t2,t1
- # t1 += [ hc3[1] * r0, hc3[0] * u0 ]
- movdqa ru0,t2
- pmuludq hc3,t2
- paddq t2,t1
- # t1 += [ hc4[1] * s4, hc4[0] * v4 ]
- movdqa sv4,t2
- pmuludq hc4,t2
- paddq t2,t1
- # d3 = t1[0] + t1[1]
- movdqa t1,t2
- psrldq $8,t2
- paddq t2,t1
- movq t1,d3
-
- # t1 = [ hc0[1] * r4, hc0[0] * u4 ]
- movdqa ru4,t1
- pmuludq hc0,t1
- # t1 += [ hc1[1] * r3, hc1[0] * u3 ]
- movdqa ru3,t2
- pmuludq hc1,t2
- paddq t2,t1
- # t1 += [ hc2[1] * r2, hc2[0] * u2 ]
- movdqa ru2,t2
- pmuludq hc2,t2
- paddq t2,t1
- # t1 += [ hc3[1] * r1, hc3[0] * u1 ]
- movdqa ru1,t2
- pmuludq hc3,t2
- paddq t2,t1
- # t1 += [ hc4[1] * r0, hc4[0] * u0 ]
- movdqa ru0,t2
- pmuludq hc4,t2
- paddq t2,t1
- # d4 = t1[0] + t1[1]
- movdqa t1,t2
- psrldq $8,t2
- paddq t2,t1
- movq t1,d4
-
- # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
- # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
- # amount. Careful: we must not assume the carry bits 'd0 >> 26',
- # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
- # integers. It's true in a single-block implementation, but not here.
-
- # d1 += d0 >> 26
- mov d0,%rax
- shr $26,%rax
- add %rax,d1
- # h0 = d0 & 0x3ffffff
- mov d0,%rbx
- and $0x3ffffff,%ebx
-
- # d2 += d1 >> 26
- mov d1,%rax
- shr $26,%rax
- add %rax,d2
- # h1 = d1 & 0x3ffffff
- mov d1,%rax
- and $0x3ffffff,%eax
- mov %eax,h1
-
- # d3 += d2 >> 26
- mov d2,%rax
- shr $26,%rax
- add %rax,d3
- # h2 = d2 & 0x3ffffff
- mov d2,%rax
- and $0x3ffffff,%eax
- mov %eax,h2
-
- # d4 += d3 >> 26
- mov d3,%rax
- shr $26,%rax
- add %rax,d4
- # h3 = d3 & 0x3ffffff
- mov d3,%rax
- and $0x3ffffff,%eax
- mov %eax,h3
-
- # h0 += (d4 >> 26) * 5
- mov d4,%rax
- shr $26,%rax
- lea (%rax,%rax,4),%rax
- add %rax,%rbx
- # h4 = d4 & 0x3ffffff
- mov d4,%rax
- and $0x3ffffff,%eax
- mov %eax,h4
-
- # h1 += h0 >> 26
- mov %rbx,%rax
- shr $26,%rax
- add %eax,h1
- # h0 = h0 & 0x3ffffff
- andl $0x3ffffff,%ebx
- mov %ebx,h0
-
- add $0x20,m
- dec %rcx
- jnz .Ldoblock2
-
- pop %r13
- pop %r12
- pop %rbx
- ret
-SYM_FUNC_END(poly1305_2block_sse2)
diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
new file mode 100644
index 000000000000..7a6b5380a46f
--- /dev/null
+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
@@ -0,0 +1,4265 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#
+# Copyright (C) 2017-2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
+# Copyright (C) 2017-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+# Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
+#
+# This code is taken from the OpenSSL project but the author, Andy Polyakov,
+# has relicensed it under the licenses specified in the SPDX header above.
+# The original headers, including the original license headers, are
+# included below for completeness.
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements Poly1305 hash for x86_64.
+#
+# March 2015
+#
+# Initial release.
+#
+# December 2016
+#
+# Add AVX512F+VL+BW code path.
+#
+# November 2017
+#
+# Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be
+# executed even on Knights Landing. Trigger for modification was
+# observation that AVX512 code paths can negatively affect overall
+# Skylake-X system performance. Since we are likely to suppress
+# AVX512F capability flag [at least on Skylake-X], conversion serves
+# as kind of "investment protection". Note that next *lake processor,
+# Cannonlake, has AVX512IFMA code path to execute...
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone,
+# measured with rdtsc at fixed clock frequency.
+#
+# IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512
+# P4 4.46/+120% -
+# Core 2 2.41/+90% -
+# Westmere 1.88/+120% -
+# Sandy Bridge 1.39/+140% 1.10
+# Haswell 1.14/+175% 1.11 0.65
+# Skylake[-X] 1.13/+120% 0.96 0.51 [0.35]
+# Silvermont 2.83/+95% -
+# Knights L 3.60/? 1.65 1.10 0.41(***)
+# Goldmont 1.70/+180% -
+# VIA Nano 1.82/+150% -
+# Sledgehammer 1.38/+160% -
+# Bulldozer 2.30/+130% 0.97
+# Ryzen 1.15/+200% 1.08 1.18
+#
+# (*) improvement coefficients relative to clang are more modest and
+# are ~50% on most processors, in both cases we are comparing to
+# __int128 code;
+# (**) SSE2 implementation was attempted, but among non-AVX processors
+# it was faster than integer-only code only on older Intel P4 and
+# Core processors, 50-30%, less newer processor is, but slower on
+# contemporary ones, for example almost 2x slower on Atom, and as
+# former are naturally disappearing, SSE2 is deemed unnecessary;
+# (***) strangely enough performance seems to vary from core to core,
+# listed result is best case;
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+$kernel=0; $kernel=1 if (!$flavour && !$output);
+
+if (!$kernel) {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+ die "can't locate x86_64-xlate.pl";
+
+ open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+ *STDOUT=*OUT;
+
+ if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
+ }
+
+ if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
+ $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
+ $avx += 1 if ($1==2.11 && $2>=8);
+ }
+
+ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+ }
+
+ if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+ }
+} else {
+ $avx = 4; # The kernel uses ifdefs for this.
+}
+
+sub declare_function() {
+ my ($name, $align, $nargs) = @_;
+ if($kernel) {
+ $code .= ".align $align\n";
+ $code .= "SYM_FUNC_START($name)\n";
+ $code .= ".L$name:\n";
+ } else {
+ $code .= ".globl $name\n";
+ $code .= ".type $name,\@function,$nargs\n";
+ $code .= ".align $align\n";
+ $code .= "$name:\n";
+ }
+}
+
+sub end_function() {
+ my ($name) = @_;
+ if($kernel) {
+ $code .= "SYM_FUNC_END($name)\n";
+ } else {
+ $code .= ".size $name,.-$name\n";
+ }
+}
+
+$code.=<<___ if $kernel;
+#include <linux/linkage.h>
+___
+
+if ($avx) {
+$code.=<<___ if $kernel;
+.section .rodata
+___
+$code.=<<___;
+.align 64
+.Lconst:
+.Lmask24:
+.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
+.L129:
+.long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
+.Lmask26:
+.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
+.Lpermd_avx2:
+.long 2,2,2,3,2,0,2,1
+.Lpermd_avx512:
+.long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7
+
+.L2_44_inp_permd:
+.long 0,1,1,2,2,3,7,7
+.L2_44_inp_shift:
+.quad 0,12,24,64
+.L2_44_mask:
+.quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
+.L2_44_shift_rgt:
+.quad 44,44,42,64
+.L2_44_shift_lft:
+.quad 8,8,10,64
+
+.align 64
+.Lx_mask44:
+.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
+.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
+.Lx_mask42:
+.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
+.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
+___
+}
+$code.=<<___ if (!$kernel);
+.asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
+my ($mac,$nonce)=($inp,$len); # *_emit arguments
+my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13");
+my ($h0,$h1,$h2)=("%r14","%rbx","%r10");
+
+sub poly1305_iteration {
+# input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
+# output: $h0-$h2 *= $r0-$r1
+$code.=<<___;
+ mulq $h0 # h0*r1
+ mov %rax,$d2
+ mov $r0,%rax
+ mov %rdx,$d3
+
+ mulq $h0 # h0*r0
+ mov %rax,$h0 # future $h0
+ mov $r0,%rax
+ mov %rdx,$d1
+
+ mulq $h1 # h1*r0
+ add %rax,$d2
+ mov $s1,%rax
+ adc %rdx,$d3
+
+ mulq $h1 # h1*s1
+ mov $h2,$h1 # borrow $h1
+ add %rax,$h0
+ adc %rdx,$d1
+
+ imulq $s1,$h1 # h2*s1
+ add $h1,$d2
+ mov $d1,$h1
+ adc \$0,$d3
+
+ imulq $r0,$h2 # h2*r0
+ add $d2,$h1
+ mov \$-4,%rax # mask value
+ adc $h2,$d3
+
+ and $d3,%rax # last reduction step
+ mov $d3,$h2
+ shr \$2,$d3
+ and \$3,$h2
+ add $d3,%rax
+ add %rax,$h0
+ adc \$0,$h1
+ adc \$0,$h2
+___
+}
+
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int64 h[3]; # current hash value base 2^64
+# unsigned __int64 r[2]; # key value base 2^64
+
+$code.=<<___;
+.text
+___
+$code.=<<___ if (!$kernel);
+.extern OPENSSL_ia32cap_P
+
+.globl poly1305_init_x86_64
+.hidden poly1305_init_x86_64
+.globl poly1305_blocks_x86_64
+.hidden poly1305_blocks_x86_64
+.globl poly1305_emit_x86_64
+.hidden poly1305_emit_x86_64
+___
+&declare_function("poly1305_init_x86_64", 32, 3);
+$code.=<<___;
+ xor %rax,%rax
+ mov %rax,0($ctx) # initialize hash value
+ mov %rax,8($ctx)
+ mov %rax,16($ctx)
+
+ cmp \$0,$inp
+ je .Lno_key
+___
+$code.=<<___ if (!$kernel);
+ lea poly1305_blocks_x86_64(%rip),%r10
+ lea poly1305_emit_x86_64(%rip),%r11
+___
+$code.=<<___ if (!$kernel && $avx);
+ mov OPENSSL_ia32cap_P+4(%rip),%r9
+ lea poly1305_blocks_avx(%rip),%rax
+ lea poly1305_emit_avx(%rip),%rcx
+ bt \$`60-32`,%r9 # AVX?
+ cmovc %rax,%r10
+ cmovc %rcx,%r11
+___
+$code.=<<___ if (!$kernel && $avx>1);
+ lea poly1305_blocks_avx2(%rip),%rax
+ bt \$`5+32`,%r9 # AVX2?
+ cmovc %rax,%r10
+___
+$code.=<<___ if (!$kernel && $avx>3);
+ mov \$`(1<<31|1<<21|1<<16)`,%rax
+ shr \$32,%r9
+ and %rax,%r9
+ cmp %rax,%r9
+ je .Linit_base2_44
+___
+$code.=<<___;
+ mov \$0x0ffffffc0fffffff,%rax
+ mov \$0x0ffffffc0ffffffc,%rcx
+ and 0($inp),%rax
+ and 8($inp),%rcx
+ mov %rax,24($ctx)
+ mov %rcx,32($ctx)
+___
+$code.=<<___ if (!$kernel && $flavour !~ /elf32/);
+ mov %r10,0(%rdx)
+ mov %r11,8(%rdx)
+___
+$code.=<<___ if (!$kernel && $flavour =~ /elf32/);
+ mov %r10d,0(%rdx)
+ mov %r11d,4(%rdx)
+___
+$code.=<<___;
+ mov \$1,%eax
+.Lno_key:
+ ret
+___
+&end_function("poly1305_init_x86_64");
+
+&declare_function("poly1305_blocks_x86_64", 32, 4);
+$code.=<<___;
+.cfi_startproc
+.Lblocks:
+ shr \$4,$len
+ jz .Lno_data # too short
+
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+ push $ctx
+.cfi_push $ctx
+.Lblocks_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ mov 0($ctx),$h0 # load hash value
+ mov 8($ctx),$h1
+ mov 16($ctx),$h2
+
+ mov $s1,$r1
+ shr \$2,$s1
+ mov $r1,%rax
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+ jmp .Loop
+
+.align 32
+.Loop:
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+___
+
+ &poly1305_iteration();
+
+$code.=<<___;
+ mov $r1,%rax
+ dec %r15 # len-=16
+ jnz .Loop
+
+ mov 0(%rsp),$ctx
+.cfi_restore $ctx
+
+ mov $h0,0($ctx) # store hash value
+ mov $h1,8($ctx)
+ mov $h2,16($ctx)
+
+ mov 8(%rsp),%r15
+.cfi_restore %r15
+ mov 16(%rsp),%r14
+.cfi_restore %r14
+ mov 24(%rsp),%r13
+.cfi_restore %r13
+ mov 32(%rsp),%r12
+.cfi_restore %r12
+ mov 40(%rsp),%rbx
+.cfi_restore %rbx
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lno_data:
+.Lblocks_epilogue:
+ ret
+.cfi_endproc
+___
+&end_function("poly1305_blocks_x86_64");
+
+&declare_function("poly1305_emit_x86_64", 32, 3);
+$code.=<<___;
+.Lemit:
+ mov 0($ctx),%r8 # load hash value
+ mov 8($ctx),%r9
+ mov 16($ctx),%r10
+
+ mov %r8,%rax
+ add \$5,%r8 # compare to modulus
+ mov %r9,%rcx
+ adc \$0,%r9
+ adc \$0,%r10
+ shr \$2,%r10 # did 130-bit value overflow?
+ cmovnz %r8,%rax
+ cmovnz %r9,%rcx
+
+ add 0($nonce),%rax # accumulate nonce
+ adc 8($nonce),%rcx
+ mov %rax,0($mac) # write result
+ mov %rcx,8($mac)
+
+ ret
+___
+&end_function("poly1305_emit_x86_64");
+if ($avx) {
+
+if($kernel) {
+ $code .= "#ifdef CONFIG_AS_AVX\n";
+}
+
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int32 h[5]; # current hash value base 2^26
+# unsigned __int32 is_base2_26;
+# unsigned __int64 r[2]; # key value base 2^64
+# unsigned __int64 pad;
+# struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
+#
+# where r^n are base 2^26 digits of degrees of multiplier key. There are
+# 5 digits, but last four are interleaved with multiples of 5, totalling
+# in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
+
+my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
+ map("%xmm$_",(0..15));
+
+$code.=<<___;
+.type __poly1305_block,\@abi-omnipotent
+.align 32
+__poly1305_block:
+ push $ctx
+___
+ &poly1305_iteration();
+$code.=<<___;
+ pop $ctx
+ ret
+.size __poly1305_block,.-__poly1305_block
+
+.type __poly1305_init_avx,\@abi-omnipotent
+.align 32
+__poly1305_init_avx:
+ push %rbp
+ mov %rsp,%rbp
+ mov $r0,$h0
+ mov $r1,$h1
+ xor $h2,$h2
+
+ lea 48+64($ctx),$ctx # size optimization
+
+ mov $r1,%rax
+ call __poly1305_block # r^2
+
+ mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
+ mov \$0x3ffffff,%edx
+ mov $h0,$d1
+ and $h0#d,%eax
+ mov $r0,$d2
+ and $r0#d,%edx
+ mov %eax,`16*0+0-64`($ctx)
+ shr \$26,$d1
+ mov %edx,`16*0+4-64`($ctx)
+ shr \$26,$d2
+
+ mov \$0x3ffffff,%eax
+ mov \$0x3ffffff,%edx
+ and $d1#d,%eax
+ and $d2#d,%edx
+ mov %eax,`16*1+0-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov %edx,`16*1+4-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ mov %eax,`16*2+0-64`($ctx)
+ shr \$26,$d1
+ mov %edx,`16*2+4-64`($ctx)
+ shr \$26,$d2
+
+ mov $h1,%rax
+ mov $r1,%rdx
+ shl \$12,%rax
+ shl \$12,%rdx
+ or $d1,%rax
+ or $d2,%rdx
+ and \$0x3ffffff,%eax
+ and \$0x3ffffff,%edx
+ mov %eax,`16*3+0-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov %edx,`16*3+4-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ mov %eax,`16*4+0-64`($ctx)
+ mov $h1,$d1
+ mov %edx,`16*4+4-64`($ctx)
+ mov $r1,$d2
+
+ mov \$0x3ffffff,%eax
+ mov \$0x3ffffff,%edx
+ shr \$14,$d1
+ shr \$14,$d2
+ and $d1#d,%eax
+ and $d2#d,%edx
+ mov %eax,`16*5+0-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov %edx,`16*5+4-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ mov %eax,`16*6+0-64`($ctx)
+ shr \$26,$d1
+ mov %edx,`16*6+4-64`($ctx)
+ shr \$26,$d2
+
+ mov $h2,%rax
+ shl \$24,%rax
+ or %rax,$d1
+ mov $d1#d,`16*7+0-64`($ctx)
+ lea ($d1,$d1,4),$d1 # *5
+ mov $d2#d,`16*7+4-64`($ctx)
+ lea ($d2,$d2,4),$d2 # *5
+ mov $d1#d,`16*8+0-64`($ctx)
+ mov $d2#d,`16*8+4-64`($ctx)
+
+ mov $r1,%rax
+ call __poly1305_block # r^3
+
+ mov \$0x3ffffff,%eax # save r^3 base 2^26
+ mov $h0,$d1
+ and $h0#d,%eax
+ shr \$26,$d1
+ mov %eax,`16*0+12-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ and $d1#d,%edx
+ mov %edx,`16*1+12-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*2+12-64`($ctx)
+
+ mov $h1,%rax
+ shl \$12,%rax
+ or $d1,%rax
+ and \$0x3ffffff,%eax
+ mov %eax,`16*3+12-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov $h1,$d1
+ mov %eax,`16*4+12-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ shr \$14,$d1
+ and $d1#d,%edx
+ mov %edx,`16*5+12-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*6+12-64`($ctx)
+
+ mov $h2,%rax
+ shl \$24,%rax
+ or %rax,$d1
+ mov $d1#d,`16*7+12-64`($ctx)
+ lea ($d1,$d1,4),$d1 # *5
+ mov $d1#d,`16*8+12-64`($ctx)
+
+ mov $r1,%rax
+ call __poly1305_block # r^4
+
+ mov \$0x3ffffff,%eax # save r^4 base 2^26
+ mov $h0,$d1
+ and $h0#d,%eax
+ shr \$26,$d1
+ mov %eax,`16*0+8-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ and $d1#d,%edx
+ mov %edx,`16*1+8-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*2+8-64`($ctx)
+
+ mov $h1,%rax
+ shl \$12,%rax
+ or $d1,%rax
+ and \$0x3ffffff,%eax
+ mov %eax,`16*3+8-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov $h1,$d1
+ mov %eax,`16*4+8-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ shr \$14,$d1
+ and $d1#d,%edx
+ mov %edx,`16*5+8-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*6+8-64`($ctx)
+
+ mov $h2,%rax
+ shl \$24,%rax
+ or %rax,$d1
+ mov $d1#d,`16*7+8-64`($ctx)
+ lea ($d1,$d1,4),$d1 # *5
+ mov $d1#d,`16*8+8-64`($ctx)
+
+ lea -48-64($ctx),$ctx # size [de-]optimization
+ pop %rbp
+ ret
+.size __poly1305_init_avx,.-__poly1305_init_avx
+___
+
+&declare_function("poly1305_blocks_avx", 32, 4);
+$code.=<<___;
+.cfi_startproc
+ mov 20($ctx),%r8d # is_base2_26
+ cmp \$128,$len
+ jae .Lblocks_avx
+ test %r8d,%r8d
+ jz .Lblocks
+
+.Lblocks_avx:
+ and \$-16,$len
+ jz .Lno_data_avx
+
+ vzeroupper
+
+ test %r8d,%r8d
+ jz .Lbase2_64_avx
+
+ test \$31,$len
+ jz .Leven_avx
+
+ push %rbp
+.cfi_push %rbp
+ mov %rsp,%rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lblocks_avx_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 0($ctx),$d1 # load hash value
+ mov 8($ctx),$d2
+ mov 16($ctx),$h2#d
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ ################################# base 2^26 -> base 2^64
+ mov $d1#d,$h0#d
+ and \$`-1*(1<<31)`,$d1
+ mov $d2,$r1 # borrow $r1
+ mov $d2#d,$h1#d
+ and \$`-1*(1<<31)`,$d2
+
+ shr \$6,$d1
+ shl \$52,$r1
+ add $d1,$h0
+ shr \$12,$h1
+ shr \$18,$d2
+ add $r1,$h0
+ adc $d2,$h1
+
+ mov $h2,$d1
+ shl \$40,$d1
+ shr \$24,$h2
+ add $d1,$h1
+ adc \$0,$h2 # can be partially reduced...
+
+ mov \$-4,$d2 # ... so reduce
+ mov $h2,$d1
+ and $h2,$d2
+ shr \$2,$d1
+ and \$3,$h2
+ add $d2,$d1 # =*5
+ add $d1,$h0
+ adc \$0,$h1
+ adc \$0,$h2
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+
+ call __poly1305_block
+
+ test $padbit,$padbit # if $padbit is zero,
+ jz .Lstore_base2_64_avx # store hash in base 2^64 format
+
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$r0
+ mov $h1,$r1
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$r0
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $r0,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$r1
+ and \$0x3ffffff,$h1 # h[3]
+ or $r1,$h2 # h[4]
+
+ sub \$16,%r15
+ jz .Lstore_base2_26_avx
+
+ vmovd %rax#d,$H0
+ vmovd %rdx#d,$H1
+ vmovd $h0#d,$H2
+ vmovd $h1#d,$H3
+ vmovd $h2#d,$H4
+ jmp .Lproceed_avx
+
+.align 32
+.Lstore_base2_64_avx:
+ mov $h0,0($ctx)
+ mov $h1,8($ctx)
+ mov $h2,16($ctx) # note that is_base2_26 is zeroed
+ jmp .Ldone_avx
+
+.align 16
+.Lstore_base2_26_avx:
+ mov %rax#d,0($ctx) # store hash value base 2^26
+ mov %rdx#d,4($ctx)
+ mov $h0#d,8($ctx)
+ mov $h1#d,12($ctx)
+ mov $h2#d,16($ctx)
+.align 16
+.Ldone_avx:
+ pop %r15
+.cfi_restore %r15
+ pop %r14
+.cfi_restore %r14
+ pop %r13
+.cfi_restore %r13
+ pop %r12
+.cfi_restore %r12
+ pop %rbx
+.cfi_restore %rbx
+ pop %rbp
+.cfi_restore %rbp
+.Lno_data_avx:
+.Lblocks_avx_epilogue:
+ ret
+.cfi_endproc
+
+.align 32
+.Lbase2_64_avx:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ mov %rsp,%rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lbase2_64_avx_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ mov 0($ctx),$h0 # load hash value
+ mov 8($ctx),$h1
+ mov 16($ctx),$h2#d
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+ test \$31,$len
+ jz .Linit_avx
+
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+ sub \$16,%r15
+
+ call __poly1305_block
+
+.Linit_avx:
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$d1
+ mov $h1,$d2
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$d1
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $d1,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$d2
+ and \$0x3ffffff,$h1 # h[3]
+ or $d2,$h2 # h[4]
+
+ vmovd %rax#d,$H0
+ vmovd %rdx#d,$H1
+ vmovd $h0#d,$H2
+ vmovd $h1#d,$H3
+ vmovd $h2#d,$H4
+ movl \$1,20($ctx) # set is_base2_26
+
+ call __poly1305_init_avx
+
+.Lproceed_avx:
+ mov %r15,$len
+ pop %r15
+.cfi_restore %r15
+ pop %r14
+.cfi_restore %r14
+ pop %r13
+.cfi_restore %r13
+ pop %r12
+.cfi_restore %r12
+ pop %rbx
+.cfi_restore %rbx
+ pop %rbp
+.cfi_restore %rbp
+.Lbase2_64_avx_epilogue:
+ jmp .Ldo_avx
+.cfi_endproc
+
+.align 32
+.Leven_avx:
+.cfi_startproc
+ vmovd 4*0($ctx),$H0 # load hash value
+ vmovd 4*1($ctx),$H1
+ vmovd 4*2($ctx),$H2
+ vmovd 4*3($ctx),$H3
+ vmovd 4*4($ctx),$H4
+
+.Ldo_avx:
+___
+$code.=<<___ if (!$win64);
+ lea 8(%rsp),%r10
+.cfi_def_cfa_register %r10
+ and \$-32,%rsp
+ sub \$-8,%rsp
+ lea -0x58(%rsp),%r11
+ sub \$0x178,%rsp
+___
+$code.=<<___ if ($win64);
+ lea -0xf8(%rsp),%r11
+ sub \$0x218,%rsp
+ vmovdqa %xmm6,0x50(%r11)
+ vmovdqa %xmm7,0x60(%r11)
+ vmovdqa %xmm8,0x70(%r11)
+ vmovdqa %xmm9,0x80(%r11)
+ vmovdqa %xmm10,0x90(%r11)
+ vmovdqa %xmm11,0xa0(%r11)
+ vmovdqa %xmm12,0xb0(%r11)
+ vmovdqa %xmm13,0xc0(%r11)
+ vmovdqa %xmm14,0xd0(%r11)
+ vmovdqa %xmm15,0xe0(%r11)
+.Ldo_avx_body:
+___
+$code.=<<___;
+ sub \$64,$len
+ lea -32($inp),%rax
+ cmovc %rax,$inp
+
+ vmovdqu `16*3`($ctx),$D4 # preload r0^2
+ lea `16*3+64`($ctx),$ctx # size optimization
+ lea .Lconst(%rip),%rcx
+
+ ################################################################
+ # load input
+ vmovdqu 16*2($inp),$T0
+ vmovdqu 16*3($inp),$T1
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpsrldq \$6,$T1,$T3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpunpcklqdq $T3,$T2,$T3 # 2:3
+
+ vpsrlq \$40,$T4,$T4 # 4
+ vpsrlq \$26,$T0,$T1
+ vpand $MASK,$T0,$T0 # 0
+ vpsrlq \$4,$T3,$T2
+ vpand $MASK,$T1,$T1 # 1
+ vpsrlq \$30,$T3,$T3
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ jbe .Lskip_loop_avx
+
+ # expand and copy pre-calculated table to stack
+ vmovdqu `16*1-64`($ctx),$D1
+ vmovdqu `16*2-64`($ctx),$D2
+ vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
+ vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
+ vmovdqa $D3,-0x90(%r11)
+ vmovdqa $D0,0x00(%rsp)
+ vpshufd \$0xEE,$D1,$D4
+ vmovdqu `16*3-64`($ctx),$D0
+ vpshufd \$0x44,$D1,$D1
+ vmovdqa $D4,-0x80(%r11)
+ vmovdqa $D1,0x10(%rsp)
+ vpshufd \$0xEE,$D2,$D3
+ vmovdqu `16*4-64`($ctx),$D1
+ vpshufd \$0x44,$D2,$D2
+ vmovdqa $D3,-0x70(%r11)
+ vmovdqa $D2,0x20(%rsp)
+ vpshufd \$0xEE,$D0,$D4
+ vmovdqu `16*5-64`($ctx),$D2
+ vpshufd \$0x44,$D0,$D0
+ vmovdqa $D4,-0x60(%r11)
+ vmovdqa $D0,0x30(%rsp)
+ vpshufd \$0xEE,$D1,$D3
+ vmovdqu `16*6-64`($ctx),$D0
+ vpshufd \$0x44,$D1,$D1
+ vmovdqa $D3,-0x50(%r11)
+ vmovdqa $D1,0x40(%rsp)
+ vpshufd \$0xEE,$D2,$D4
+ vmovdqu `16*7-64`($ctx),$D1
+ vpshufd \$0x44,$D2,$D2
+ vmovdqa $D4,-0x40(%r11)
+ vmovdqa $D2,0x50(%rsp)
+ vpshufd \$0xEE,$D0,$D3
+ vmovdqu `16*8-64`($ctx),$D2
+ vpshufd \$0x44,$D0,$D0
+ vmovdqa $D3,-0x30(%r11)
+ vmovdqa $D0,0x60(%rsp)
+ vpshufd \$0xEE,$D1,$D4
+ vpshufd \$0x44,$D1,$D1
+ vmovdqa $D4,-0x20(%r11)
+ vmovdqa $D1,0x70(%rsp)
+ vpshufd \$0xEE,$D2,$D3
+ vmovdqa 0x00(%rsp),$D4 # preload r0^2
+ vpshufd \$0x44,$D2,$D2
+ vmovdqa $D3,-0x10(%r11)
+ vmovdqa $D2,0x80(%rsp)
+
+ jmp .Loop_avx
+
+.align 32
+.Loop_avx:
+ ################################################################
+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ # \___________________/
+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ # \___________________/ \____________________/
+ #
+ # Note that we start with inp[2:3]*r^2. This is because it
+ # doesn't depend on reduction in previous iteration.
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ #
+ # though note that $Tx and $Hx are "reversed" in this section,
+ # and $D4 is preloaded with r0^2...
+
+ vpmuludq $T0,$D4,$D0 # d0 = h0*r0
+ vpmuludq $T1,$D4,$D1 # d1 = h1*r0
+ vmovdqa $H2,0x20(%r11) # offload hash
+ vpmuludq $T2,$D4,$D2 # d3 = h2*r0
+ vmovdqa 0x10(%rsp),$H2 # r1^2
+ vpmuludq $T3,$D4,$D3 # d3 = h3*r0
+ vpmuludq $T4,$D4,$D4 # d4 = h4*r0
+
+ vmovdqa $H0,0x00(%r11) #
+ vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
+ vmovdqa $H1,0x10(%r11) #
+ vpmuludq $T3,$H2,$H1 # h3*r1
+ vpaddq $H0,$D0,$D0 # d0 += h4*s1
+ vpaddq $H1,$D4,$D4 # d4 += h3*r1
+ vmovdqa $H3,0x30(%r11) #
+ vpmuludq $T2,$H2,$H0 # h2*r1
+ vpmuludq $T1,$H2,$H1 # h1*r1
+ vpaddq $H0,$D3,$D3 # d3 += h2*r1
+ vmovdqa 0x30(%rsp),$H3 # r2^2
+ vpaddq $H1,$D2,$D2 # d2 += h1*r1
+ vmovdqa $H4,0x40(%r11) #
+ vpmuludq $T0,$H2,$H2 # h0*r1
+ vpmuludq $T2,$H3,$H0 # h2*r2
+ vpaddq $H2,$D1,$D1 # d1 += h0*r1
+
+ vmovdqa 0x40(%rsp),$H4 # s2^2
+ vpaddq $H0,$D4,$D4 # d4 += h2*r2
+ vpmuludq $T1,$H3,$H1 # h1*r2
+ vpmuludq $T0,$H3,$H3 # h0*r2
+ vpaddq $H1,$D3,$D3 # d3 += h1*r2
+ vmovdqa 0x50(%rsp),$H2 # r3^2
+ vpaddq $H3,$D2,$D2 # d2 += h0*r2
+ vpmuludq $T4,$H4,$H0 # h4*s2
+ vpmuludq $T3,$H4,$H4 # h3*s2
+ vpaddq $H0,$D1,$D1 # d1 += h4*s2
+ vmovdqa 0x60(%rsp),$H3 # s3^2
+ vpaddq $H4,$D0,$D0 # d0 += h3*s2
+
+ vmovdqa 0x80(%rsp),$H4 # s4^2
+ vpmuludq $T1,$H2,$H1 # h1*r3
+ vpmuludq $T0,$H2,$H2 # h0*r3
+ vpaddq $H1,$D4,$D4 # d4 += h1*r3
+ vpaddq $H2,$D3,$D3 # d3 += h0*r3
+ vpmuludq $T4,$H3,$H0 # h4*s3
+ vpmuludq $T3,$H3,$H1 # h3*s3
+ vpaddq $H0,$D2,$D2 # d2 += h4*s3
+ vmovdqu 16*0($inp),$H0 # load input
+ vpaddq $H1,$D1,$D1 # d1 += h3*s3
+ vpmuludq $T2,$H3,$H3 # h2*s3
+ vpmuludq $T2,$H4,$T2 # h2*s4
+ vpaddq $H3,$D0,$D0 # d0 += h2*s3
+
+ vmovdqu 16*1($inp),$H1 #
+ vpaddq $T2,$D1,$D1 # d1 += h2*s4
+ vpmuludq $T3,$H4,$T3 # h3*s4
+ vpmuludq $T4,$H4,$T4 # h4*s4
+ vpsrldq \$6,$H0,$H2 # splat input
+ vpaddq $T3,$D2,$D2 # d2 += h3*s4
+ vpaddq $T4,$D3,$D3 # d3 += h4*s4
+ vpsrldq \$6,$H1,$H3 #
+ vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
+ vpmuludq $T1,$H4,$T0 # h1*s4
+ vpunpckhqdq $H1,$H0,$H4 # 4
+ vpaddq $T4,$D4,$D4 # d4 += h0*r4
+ vmovdqa -0x90(%r11),$T4 # r0^4
+ vpaddq $T0,$D0,$D0 # d0 += h1*s4
+
+ vpunpcklqdq $H1,$H0,$H0 # 0:1
+ vpunpcklqdq $H3,$H2,$H3 # 2:3
+
+ #vpsrlq \$40,$H4,$H4 # 4
+ vpsrldq \$`40/8`,$H4,$H4 # 4
+ vpsrlq \$26,$H0,$H1
+ vpand $MASK,$H0,$H0 # 0
+ vpsrlq \$4,$H3,$H2
+ vpand $MASK,$H1,$H1 # 1
+ vpand 0(%rcx),$H4,$H4 # .Lmask24
+ vpsrlq \$30,$H3,$H3
+ vpand $MASK,$H2,$H2 # 2
+ vpand $MASK,$H3,$H3 # 3
+ vpor 32(%rcx),$H4,$H4 # padbit, yes, always
+
+ vpaddq 0x00(%r11),$H0,$H0 # add hash value
+ vpaddq 0x10(%r11),$H1,$H1
+ vpaddq 0x20(%r11),$H2,$H2
+ vpaddq 0x30(%r11),$H3,$H3
+ vpaddq 0x40(%r11),$H4,$H4
+
+ lea 16*2($inp),%rax
+ lea 16*4($inp),$inp
+ sub \$64,$len
+ cmovc %rax,$inp
+
+ ################################################################
+ # Now we accumulate (inp[0:1]+hash)*r^4
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ vpmuludq $H0,$T4,$T0 # h0*r0
+ vpmuludq $H1,$T4,$T1 # h1*r0
+ vpaddq $T0,$D0,$D0
+ vpaddq $T1,$D1,$D1
+ vmovdqa -0x80(%r11),$T2 # r1^4
+ vpmuludq $H2,$T4,$T0 # h2*r0
+ vpmuludq $H3,$T4,$T1 # h3*r0
+ vpaddq $T0,$D2,$D2
+ vpaddq $T1,$D3,$D3
+ vpmuludq $H4,$T4,$T4 # h4*r0
+ vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
+ vpaddq $T4,$D4,$D4
+
+ vpaddq $T0,$D0,$D0 # d0 += h4*s1
+ vpmuludq $H2,$T2,$T1 # h2*r1
+ vpmuludq $H3,$T2,$T0 # h3*r1
+ vpaddq $T1,$D3,$D3 # d3 += h2*r1
+ vmovdqa -0x60(%r11),$T3 # r2^4
+ vpaddq $T0,$D4,$D4 # d4 += h3*r1
+ vpmuludq $H1,$T2,$T1 # h1*r1
+ vpmuludq $H0,$T2,$T2 # h0*r1
+ vpaddq $T1,$D2,$D2 # d2 += h1*r1
+ vpaddq $T2,$D1,$D1 # d1 += h0*r1
+
+ vmovdqa -0x50(%r11),$T4 # s2^4
+ vpmuludq $H2,$T3,$T0 # h2*r2
+ vpmuludq $H1,$T3,$T1 # h1*r2
+ vpaddq $T0,$D4,$D4 # d4 += h2*r2
+ vpaddq $T1,$D3,$D3 # d3 += h1*r2
+ vmovdqa -0x40(%r11),$T2 # r3^4
+ vpmuludq $H0,$T3,$T3 # h0*r2
+ vpmuludq $H4,$T4,$T0 # h4*s2
+ vpaddq $T3,$D2,$D2 # d2 += h0*r2
+ vpaddq $T0,$D1,$D1 # d1 += h4*s2
+ vmovdqa -0x30(%r11),$T3 # s3^4
+ vpmuludq $H3,$T4,$T4 # h3*s2
+ vpmuludq $H1,$T2,$T1 # h1*r3
+ vpaddq $T4,$D0,$D0 # d0 += h3*s2
+
+ vmovdqa -0x10(%r11),$T4 # s4^4
+ vpaddq $T1,$D4,$D4 # d4 += h1*r3
+ vpmuludq $H0,$T2,$T2 # h0*r3
+ vpmuludq $H4,$T3,$T0 # h4*s3
+ vpaddq $T2,$D3,$D3 # d3 += h0*r3
+ vpaddq $T0,$D2,$D2 # d2 += h4*s3
+ vmovdqu 16*2($inp),$T0 # load input
+ vpmuludq $H3,$T3,$T2 # h3*s3
+ vpmuludq $H2,$T3,$T3 # h2*s3
+ vpaddq $T2,$D1,$D1 # d1 += h3*s3
+ vmovdqu 16*3($inp),$T1 #
+ vpaddq $T3,$D0,$D0 # d0 += h2*s3
+
+ vpmuludq $H2,$T4,$H2 # h2*s4
+ vpmuludq $H3,$T4,$H3 # h3*s4
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpaddq $H2,$D1,$D1 # d1 += h2*s4
+ vpmuludq $H4,$T4,$H4 # h4*s4
+ vpsrldq \$6,$T1,$T3 #
+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
+ vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
+ vpmuludq $H1,$T4,$H0
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
+
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpunpcklqdq $T3,$T2,$T3 # 2:3
+
+ #vpsrlq \$40,$T4,$T4 # 4
+ vpsrldq \$`40/8`,$T4,$T4 # 4
+ vpsrlq \$26,$T0,$T1
+ vmovdqa 0x00(%rsp),$D4 # preload r0^2
+ vpand $MASK,$T0,$T0 # 0
+ vpsrlq \$4,$T3,$T2
+ vpand $MASK,$T1,$T1 # 1
+ vpand 0(%rcx),$T4,$T4 # .Lmask24
+ vpsrlq \$30,$T3,$T3
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ ################################################################
+ # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ # and P. Schwabe
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$D1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D0
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D0,$H0,$H0
+ vpsllq \$2,$D0,$D0
+ vpaddq $D0,$H0,$H0 # h4 -> h0
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ ja .Loop_avx
+
+.Lskip_loop_avx:
+ ################################################################
+ # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
+ add \$32,$len
+ jnz .Long_tail_avx
+
+ vpaddq $H2,$T2,$T2
+ vpaddq $H0,$T0,$T0
+ vpaddq $H1,$T1,$T1
+ vpaddq $H3,$T3,$T3
+ vpaddq $H4,$T4,$T4
+
+.Long_tail_avx:
+ vmovdqa $H2,0x20(%r11)
+ vmovdqa $H0,0x00(%r11)
+ vmovdqa $H1,0x10(%r11)
+ vmovdqa $H3,0x30(%r11)
+ vmovdqa $H4,0x40(%r11)
+
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ vpmuludq $T2,$D4,$D2 # d2 = h2*r0
+ vpmuludq $T0,$D4,$D0 # d0 = h0*r0
+ vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
+ vpmuludq $T1,$D4,$D1 # d1 = h1*r0
+ vpmuludq $T3,$D4,$D3 # d3 = h3*r0
+ vpmuludq $T4,$D4,$D4 # d4 = h4*r0
+
+ vpmuludq $T3,$H2,$H0 # h3*r1
+ vpaddq $H0,$D4,$D4 # d4 += h3*r1
+ vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
+ vpmuludq $T2,$H2,$H1 # h2*r1
+ vpaddq $H1,$D3,$D3 # d3 += h2*r1
+ vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
+ vpmuludq $T1,$H2,$H0 # h1*r1
+ vpaddq $H0,$D2,$D2 # d2 += h1*r1
+ vpmuludq $T0,$H2,$H2 # h0*r1
+ vpaddq $H2,$D1,$D1 # d1 += h0*r1
+ vpmuludq $T4,$H3,$H3 # h4*s1
+ vpaddq $H3,$D0,$D0 # d0 += h4*s1
+
+ vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
+ vpmuludq $T2,$H4,$H1 # h2*r2
+ vpaddq $H1,$D4,$D4 # d4 += h2*r2
+ vpmuludq $T1,$H4,$H0 # h1*r2
+ vpaddq $H0,$D3,$D3 # d3 += h1*r2
+ vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
+ vpmuludq $T0,$H4,$H4 # h0*r2
+ vpaddq $H4,$D2,$D2 # d2 += h0*r2
+ vpmuludq $T4,$H2,$H1 # h4*s2
+ vpaddq $H1,$D1,$D1 # d1 += h4*s2
+ vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
+ vpmuludq $T3,$H2,$H2 # h3*s2
+ vpaddq $H2,$D0,$D0 # d0 += h3*s2
+
+ vpmuludq $T1,$H3,$H0 # h1*r3
+ vpaddq $H0,$D4,$D4 # d4 += h1*r3
+ vpmuludq $T0,$H3,$H3 # h0*r3
+ vpaddq $H3,$D3,$D3 # d3 += h0*r3
+ vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
+ vpmuludq $T4,$H4,$H1 # h4*s3
+ vpaddq $H1,$D2,$D2 # d2 += h4*s3
+ vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
+ vpmuludq $T3,$H4,$H0 # h3*s3
+ vpaddq $H0,$D1,$D1 # d1 += h3*s3
+ vpmuludq $T2,$H4,$H4 # h2*s3
+ vpaddq $H4,$D0,$D0 # d0 += h2*s3
+
+ vpmuludq $T0,$H2,$H2 # h0*r4
+ vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
+ vpmuludq $T4,$H3,$H1 # h4*s4
+ vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
+ vpmuludq $T3,$H3,$H0 # h3*s4
+ vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
+ vpmuludq $T2,$H3,$H1 # h2*s4
+ vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
+ vpmuludq $T1,$H3,$H3 # h1*s4
+ vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
+
+ jz .Lshort_tail_avx
+
+ vmovdqu 16*0($inp),$H0 # load input
+ vmovdqu 16*1($inp),$H1
+
+ vpsrldq \$6,$H0,$H2 # splat input
+ vpsrldq \$6,$H1,$H3
+ vpunpckhqdq $H1,$H0,$H4 # 4
+ vpunpcklqdq $H1,$H0,$H0 # 0:1
+ vpunpcklqdq $H3,$H2,$H3 # 2:3
+
+ vpsrlq \$40,$H4,$H4 # 4
+ vpsrlq \$26,$H0,$H1
+ vpand $MASK,$H0,$H0 # 0
+ vpsrlq \$4,$H3,$H2
+ vpand $MASK,$H1,$H1 # 1
+ vpsrlq \$30,$H3,$H3
+ vpand $MASK,$H2,$H2 # 2
+ vpand $MASK,$H3,$H3 # 3
+ vpor 32(%rcx),$H4,$H4 # padbit, yes, always
+
+ vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
+ vpaddq 0x00(%r11),$H0,$H0
+ vpaddq 0x10(%r11),$H1,$H1
+ vpaddq 0x20(%r11),$H2,$H2
+ vpaddq 0x30(%r11),$H3,$H3
+ vpaddq 0x40(%r11),$H4,$H4
+
+ ################################################################
+ # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
+
+ vpmuludq $H0,$T4,$T0 # h0*r0
+ vpaddq $T0,$D0,$D0 # d0 += h0*r0
+ vpmuludq $H1,$T4,$T1 # h1*r0
+ vpaddq $T1,$D1,$D1 # d1 += h1*r0
+ vpmuludq $H2,$T4,$T0 # h2*r0
+ vpaddq $T0,$D2,$D2 # d2 += h2*r0
+ vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
+ vpmuludq $H3,$T4,$T1 # h3*r0
+ vpaddq $T1,$D3,$D3 # d3 += h3*r0
+ vpmuludq $H4,$T4,$T4 # h4*r0
+ vpaddq $T4,$D4,$D4 # d4 += h4*r0
+
+ vpmuludq $H3,$T2,$T0 # h3*r1
+ vpaddq $T0,$D4,$D4 # d4 += h3*r1
+ vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
+ vpmuludq $H2,$T2,$T1 # h2*r1
+ vpaddq $T1,$D3,$D3 # d3 += h2*r1
+ vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
+ vpmuludq $H1,$T2,$T0 # h1*r1
+ vpaddq $T0,$D2,$D2 # d2 += h1*r1
+ vpmuludq $H0,$T2,$T2 # h0*r1
+ vpaddq $T2,$D1,$D1 # d1 += h0*r1
+ vpmuludq $H4,$T3,$T3 # h4*s1
+ vpaddq $T3,$D0,$D0 # d0 += h4*s1
+
+ vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
+ vpmuludq $H2,$T4,$T1 # h2*r2
+ vpaddq $T1,$D4,$D4 # d4 += h2*r2
+ vpmuludq $H1,$T4,$T0 # h1*r2
+ vpaddq $T0,$D3,$D3 # d3 += h1*r2
+ vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
+ vpmuludq $H0,$T4,$T4 # h0*r2
+ vpaddq $T4,$D2,$D2 # d2 += h0*r2
+ vpmuludq $H4,$T2,$T1 # h4*s2
+ vpaddq $T1,$D1,$D1 # d1 += h4*s2
+ vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
+ vpmuludq $H3,$T2,$T2 # h3*s2
+ vpaddq $T2,$D0,$D0 # d0 += h3*s2
+
+ vpmuludq $H1,$T3,$T0 # h1*r3
+ vpaddq $T0,$D4,$D4 # d4 += h1*r3
+ vpmuludq $H0,$T3,$T3 # h0*r3
+ vpaddq $T3,$D3,$D3 # d3 += h0*r3
+ vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
+ vpmuludq $H4,$T4,$T1 # h4*s3
+ vpaddq $T1,$D2,$D2 # d2 += h4*s3
+ vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
+ vpmuludq $H3,$T4,$T0 # h3*s3
+ vpaddq $T0,$D1,$D1 # d1 += h3*s3
+ vpmuludq $H2,$T4,$T4 # h2*s3
+ vpaddq $T4,$D0,$D0 # d0 += h2*s3
+
+ vpmuludq $H0,$T2,$T2 # h0*r4
+ vpaddq $T2,$D4,$D4 # d4 += h0*r4
+ vpmuludq $H4,$T3,$T1 # h4*s4
+ vpaddq $T1,$D3,$D3 # d3 += h4*s4
+ vpmuludq $H3,$T3,$T0 # h3*s4
+ vpaddq $T0,$D2,$D2 # d2 += h3*s4
+ vpmuludq $H2,$T3,$T1 # h2*s4
+ vpaddq $T1,$D1,$D1 # d1 += h2*s4
+ vpmuludq $H1,$T3,$T3 # h1*s4
+ vpaddq $T3,$D0,$D0 # d0 += h1*s4
+
+.Lshort_tail_avx:
+ ################################################################
+ # horizontal addition
+
+ vpsrldq \$8,$D4,$T4
+ vpsrldq \$8,$D3,$T3
+ vpsrldq \$8,$D1,$T1
+ vpsrldq \$8,$D0,$T0
+ vpsrldq \$8,$D2,$T2
+ vpaddq $T3,$D3,$D3
+ vpaddq $T4,$D4,$D4
+ vpaddq $T0,$D0,$D0
+ vpaddq $T1,$D1,$D1
+ vpaddq $T2,$D2,$D2
+
+ ################################################################
+ # lazy reduction
+
+ vpsrlq \$26,$D3,$H3
+ vpand $MASK,$D3,$D3
+ vpaddq $H3,$D4,$D4 # h3 -> h4
+
+ vpsrlq \$26,$D0,$H0
+ vpand $MASK,$D0,$D0
+ vpaddq $H0,$D1,$D1 # h0 -> h1
+
+ vpsrlq \$26,$D4,$H4
+ vpand $MASK,$D4,$D4
+
+ vpsrlq \$26,$D1,$H1
+ vpand $MASK,$D1,$D1
+ vpaddq $H1,$D2,$D2 # h1 -> h2
+
+ vpaddq $H4,$D0,$D0
+ vpsllq \$2,$H4,$H4
+ vpaddq $H4,$D0,$D0 # h4 -> h0
+
+ vpsrlq \$26,$D2,$H2
+ vpand $MASK,$D2,$D2
+ vpaddq $H2,$D3,$D3 # h2 -> h3
+
+ vpsrlq \$26,$D0,$H0
+ vpand $MASK,$D0,$D0
+ vpaddq $H0,$D1,$D1 # h0 -> h1
+
+ vpsrlq \$26,$D3,$H3
+ vpand $MASK,$D3,$D3
+ vpaddq $H3,$D4,$D4 # h3 -> h4
+
+ vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
+ vmovd $D1,`4*1-48-64`($ctx)
+ vmovd $D2,`4*2-48-64`($ctx)
+ vmovd $D3,`4*3-48-64`($ctx)
+ vmovd $D4,`4*4-48-64`($ctx)
+___
+$code.=<<___ if ($win64);
+ vmovdqa 0x50(%r11),%xmm6
+ vmovdqa 0x60(%r11),%xmm7
+ vmovdqa 0x70(%r11),%xmm8
+ vmovdqa 0x80(%r11),%xmm9
+ vmovdqa 0x90(%r11),%xmm10
+ vmovdqa 0xa0(%r11),%xmm11
+ vmovdqa 0xb0(%r11),%xmm12
+ vmovdqa 0xc0(%r11),%xmm13
+ vmovdqa 0xd0(%r11),%xmm14
+ vmovdqa 0xe0(%r11),%xmm15
+ lea 0xf8(%r11),%rsp
+.Ldo_avx_epilogue:
+___
+$code.=<<___ if (!$win64);
+ lea -8(%r10),%rsp
+.cfi_def_cfa_register %rsp
+___
+$code.=<<___;
+ vzeroupper
+ ret
+.cfi_endproc
+___
+&end_function("poly1305_blocks_avx");
+
+&declare_function("poly1305_emit_avx", 32, 3);
+$code.=<<___;
+ cmpl \$0,20($ctx) # is_base2_26?
+ je .Lemit
+
+ mov 0($ctx),%eax # load hash value base 2^26
+ mov 4($ctx),%ecx
+ mov 8($ctx),%r8d
+ mov 12($ctx),%r11d
+ mov 16($ctx),%r10d
+
+ shl \$26,%rcx # base 2^26 -> base 2^64
+ mov %r8,%r9
+ shl \$52,%r8
+ add %rcx,%rax
+ shr \$12,%r9
+ add %rax,%r8 # h0
+ adc \$0,%r9
+
+ shl \$14,%r11
+ mov %r10,%rax
+ shr \$24,%r10
+ add %r11,%r9
+ shl \$40,%rax
+ add %rax,%r9 # h1
+ adc \$0,%r10 # h2
+
+ mov %r10,%rax # could be partially reduced, so reduce
+ mov %r10,%rcx
+ and \$3,%r10
+ shr \$2,%rax
+ and \$-4,%rcx
+ add %rcx,%rax
+ add %rax,%r8
+ adc \$0,%r9
+ adc \$0,%r10
+
+ mov %r8,%rax
+ add \$5,%r8 # compare to modulus
+ mov %r9,%rcx
+ adc \$0,%r9
+ adc \$0,%r10
+ shr \$2,%r10 # did 130-bit value overflow?
+ cmovnz %r8,%rax
+ cmovnz %r9,%rcx
+
+ add 0($nonce),%rax # accumulate nonce
+ adc 8($nonce),%rcx
+ mov %rax,0($mac) # write result
+ mov %rcx,8($mac)
+
+ ret
+___
+&end_function("poly1305_emit_avx");
+
+if ($kernel) {
+ $code .= "#endif\n";
+}
+
+if ($avx>1) {
+
+if ($kernel) {
+ $code .= "#ifdef CONFIG_AS_AVX2\n";
+}
+
+my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
+ map("%ymm$_",(0..15));
+my $S4=$MASK;
+
+sub poly1305_blocks_avxN {
+ my ($avx512) = @_;
+ my $suffix = $avx512 ? "_avx512" : "";
+$code.=<<___;
+.cfi_startproc
+ mov 20($ctx),%r8d # is_base2_26
+ cmp \$128,$len
+ jae .Lblocks_avx2$suffix
+ test %r8d,%r8d
+ jz .Lblocks
+
+.Lblocks_avx2$suffix:
+ and \$-16,$len
+ jz .Lno_data_avx2$suffix
+
+ vzeroupper
+
+ test %r8d,%r8d
+ jz .Lbase2_64_avx2$suffix
+
+ test \$63,$len
+ jz .Leven_avx2$suffix
+
+ push %rbp
+.cfi_push %rbp
+ mov %rsp,%rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lblocks_avx2_body$suffix:
+
+ mov $len,%r15 # reassign $len
+
+ mov 0($ctx),$d1 # load hash value
+ mov 8($ctx),$d2
+ mov 16($ctx),$h2#d
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ ################################# base 2^26 -> base 2^64
+ mov $d1#d,$h0#d
+ and \$`-1*(1<<31)`,$d1
+ mov $d2,$r1 # borrow $r1
+ mov $d2#d,$h1#d
+ and \$`-1*(1<<31)`,$d2
+
+ shr \$6,$d1
+ shl \$52,$r1
+ add $d1,$h0
+ shr \$12,$h1
+ shr \$18,$d2
+ add $r1,$h0
+ adc $d2,$h1
+
+ mov $h2,$d1
+ shl \$40,$d1
+ shr \$24,$h2
+ add $d1,$h1
+ adc \$0,$h2 # can be partially reduced...
+
+ mov \$-4,$d2 # ... so reduce
+ mov $h2,$d1
+ and $h2,$d2
+ shr \$2,$d1
+ and \$3,$h2
+ add $d2,$d1 # =*5
+ add $d1,$h0
+ adc \$0,$h1
+ adc \$0,$h2
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+.Lbase2_26_pre_avx2$suffix:
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+ sub \$16,%r15
+
+ call __poly1305_block
+ mov $r1,%rax
+
+ test \$63,%r15
+ jnz .Lbase2_26_pre_avx2$suffix
+
+ test $padbit,$padbit # if $padbit is zero,
+ jz .Lstore_base2_64_avx2$suffix # store hash in base 2^64 format
+
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$r0
+ mov $h1,$r1
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$r0
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $r0,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$r1
+ and \$0x3ffffff,$h1 # h[3]
+ or $r1,$h2 # h[4]
+
+ test %r15,%r15
+ jz .Lstore_base2_26_avx2$suffix
+
+ vmovd %rax#d,%x#$H0
+ vmovd %rdx#d,%x#$H1
+ vmovd $h0#d,%x#$H2
+ vmovd $h1#d,%x#$H3
+ vmovd $h2#d,%x#$H4
+ jmp .Lproceed_avx2$suffix
+
+.align 32
+.Lstore_base2_64_avx2$suffix:
+ mov $h0,0($ctx)
+ mov $h1,8($ctx)
+ mov $h2,16($ctx) # note that is_base2_26 is zeroed
+ jmp .Ldone_avx2$suffix
+
+.align 16
+.Lstore_base2_26_avx2$suffix:
+ mov %rax#d,0($ctx) # store hash value base 2^26
+ mov %rdx#d,4($ctx)
+ mov $h0#d,8($ctx)
+ mov $h1#d,12($ctx)
+ mov $h2#d,16($ctx)
+.align 16
+.Ldone_avx2$suffix:
+ pop %r15
+.cfi_restore %r15
+ pop %r14
+.cfi_restore %r14
+ pop %r13
+.cfi_restore %r13
+ pop %r12
+.cfi_restore %r12
+ pop %rbx
+.cfi_restore %rbx
+ pop %rbp
+.cfi_restore %rbp
+.Lno_data_avx2$suffix:
+.Lblocks_avx2_epilogue$suffix:
+ ret
+.cfi_endproc
+
+.align 32
+.Lbase2_64_avx2$suffix:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ mov %rsp,%rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lbase2_64_avx2_body$suffix:
+
+ mov $len,%r15 # reassign $len
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ mov 0($ctx),$h0 # load hash value
+ mov 8($ctx),$h1
+ mov 16($ctx),$h2#d
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+ test \$63,$len
+ jz .Linit_avx2$suffix
+
+.Lbase2_64_pre_avx2$suffix:
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+ sub \$16,%r15
+
+ call __poly1305_block
+ mov $r1,%rax
+
+ test \$63,%r15
+ jnz .Lbase2_64_pre_avx2$suffix
+
+.Linit_avx2$suffix:
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$d1
+ mov $h1,$d2
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$d1
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $d1,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$d2
+ and \$0x3ffffff,$h1 # h[3]
+ or $d2,$h2 # h[4]
+
+ vmovd %rax#d,%x#$H0
+ vmovd %rdx#d,%x#$H1
+ vmovd $h0#d,%x#$H2
+ vmovd $h1#d,%x#$H3
+ vmovd $h2#d,%x#$H4
+ movl \$1,20($ctx) # set is_base2_26
+
+ call __poly1305_init_avx
+
+.Lproceed_avx2$suffix:
+ mov %r15,$len # restore $len
+___
+$code.=<<___ if (!$kernel);
+ mov OPENSSL_ia32cap_P+8(%rip),%r9d
+ mov \$`(1<<31|1<<30|1<<16)`,%r11d
+___
+$code.=<<___;
+ pop %r15
+.cfi_restore %r15
+ pop %r14
+.cfi_restore %r14
+ pop %r13
+.cfi_restore %r13
+ pop %r12
+.cfi_restore %r12
+ pop %rbx
+.cfi_restore %rbx
+ pop %rbp
+.cfi_restore %rbp
+.Lbase2_64_avx2_epilogue$suffix:
+ jmp .Ldo_avx2$suffix
+.cfi_endproc
+
+.align 32
+.Leven_avx2$suffix:
+.cfi_startproc
+___
+$code.=<<___ if (!$kernel);
+ mov OPENSSL_ia32cap_P+8(%rip),%r9d
+___
+$code.=<<___;
+ vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
+ vmovd 4*1($ctx),%x#$H1
+ vmovd 4*2($ctx),%x#$H2
+ vmovd 4*3($ctx),%x#$H3
+ vmovd 4*4($ctx),%x#$H4
+
+.Ldo_avx2$suffix:
+___
+$code.=<<___ if (!$kernel && $avx>2);
+ cmp \$512,$len
+ jb .Lskip_avx512
+ and %r11d,%r9d
+ test \$`1<<16`,%r9d # check for AVX512F
+ jnz .Lblocks_avx512
+.Lskip_avx512$suffix:
+___
+$code.=<<___ if ($avx > 2 && $avx512 && $kernel);
+ cmp \$512,$len
+ jae .Lblocks_avx512
+___
+$code.=<<___ if (!$win64);
+ lea 8(%rsp),%r10
+.cfi_def_cfa_register %r10
+ sub \$0x128,%rsp
+___
+$code.=<<___ if ($win64);
+ lea 8(%rsp),%r10
+ sub \$0x1c8,%rsp
+ vmovdqa %xmm6,-0xb0(%r10)
+ vmovdqa %xmm7,-0xa0(%r10)
+ vmovdqa %xmm8,-0x90(%r10)
+ vmovdqa %xmm9,-0x80(%r10)
+ vmovdqa %xmm10,-0x70(%r10)
+ vmovdqa %xmm11,-0x60(%r10)
+ vmovdqa %xmm12,-0x50(%r10)
+ vmovdqa %xmm13,-0x40(%r10)
+ vmovdqa %xmm14,-0x30(%r10)
+ vmovdqa %xmm15,-0x20(%r10)
+.Ldo_avx2_body$suffix:
+___
+$code.=<<___;
+ lea .Lconst(%rip),%rcx
+ lea 48+64($ctx),$ctx # size optimization
+ vmovdqa 96(%rcx),$T0 # .Lpermd_avx2
+
+ # expand and copy pre-calculated table to stack
+ vmovdqu `16*0-64`($ctx),%x#$T2
+ and \$-512,%rsp
+ vmovdqu `16*1-64`($ctx),%x#$T3
+ vmovdqu `16*2-64`($ctx),%x#$T4
+ vmovdqu `16*3-64`($ctx),%x#$D0
+ vmovdqu `16*4-64`($ctx),%x#$D1
+ vmovdqu `16*5-64`($ctx),%x#$D2
+ lea 0x90(%rsp),%rax # size optimization
+ vmovdqu `16*6-64`($ctx),%x#$D3
+ vpermd $T2,$T0,$T2 # 00003412 -> 14243444
+ vmovdqu `16*7-64`($ctx),%x#$D4
+ vpermd $T3,$T0,$T3
+ vmovdqu `16*8-64`($ctx),%x#$MASK
+ vpermd $T4,$T0,$T4
+ vmovdqa $T2,0x00(%rsp)
+ vpermd $D0,$T0,$D0
+ vmovdqa $T3,0x20-0x90(%rax)
+ vpermd $D1,$T0,$D1
+ vmovdqa $T4,0x40-0x90(%rax)
+ vpermd $D2,$T0,$D2
+ vmovdqa $D0,0x60-0x90(%rax)
+ vpermd $D3,$T0,$D3
+ vmovdqa $D1,0x80-0x90(%rax)
+ vpermd $D4,$T0,$D4
+ vmovdqa $D2,0xa0-0x90(%rax)
+ vpermd $MASK,$T0,$MASK
+ vmovdqa $D3,0xc0-0x90(%rax)
+ vmovdqa $D4,0xe0-0x90(%rax)
+ vmovdqa $MASK,0x100-0x90(%rax)
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+
+ ################################################################
+ # load input
+ vmovdqu 16*0($inp),%x#$T0
+ vmovdqu 16*1($inp),%x#$T1
+ vinserti128 \$1,16*2($inp),$T0,$T0
+ vinserti128 \$1,16*3($inp),$T1,$T1
+ lea 16*4($inp),$inp
+
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpsrldq \$6,$T1,$T3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpunpcklqdq $T3,$T2,$T2 # 2:3
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+
+ vpsrlq \$30,$T2,$T3
+ vpsrlq \$4,$T2,$T2
+ vpsrlq \$26,$T0,$T1
+ vpsrlq \$40,$T4,$T4 # 4
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T0,$T0 # 0
+ vpand $MASK,$T1,$T1 # 1
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ vpaddq $H2,$T2,$H2 # accumulate input
+ sub \$64,$len
+ jz .Ltail_avx2$suffix
+ jmp .Loop_avx2$suffix
+
+.align 32
+.Loop_avx2$suffix:
+ ################################################################
+ # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
+ # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
+ # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
+ # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
+ # \________/\__________/
+ ################################################################
+ #vpaddq $H2,$T2,$H2 # accumulate input
+ vpaddq $H0,$T0,$H0
+ vmovdqa `32*0`(%rsp),$T0 # r0^4
+ vpaddq $H1,$T1,$H1
+ vmovdqa `32*1`(%rsp),$T1 # r1^4
+ vpaddq $H3,$T3,$H3
+ vmovdqa `32*3`(%rsp),$T2 # r2^4
+ vpaddq $H4,$T4,$H4
+ vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
+ vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
+
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ #
+ # however, as h2 is "chronologically" first one available pull
+ # corresponding operations up, so it's
+ #
+ # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
+ # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
+ # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
+
+ vpmuludq $H2,$T0,$D2 # d2 = h2*r0
+ vpmuludq $H2,$T1,$D3 # d3 = h2*r1
+ vpmuludq $H2,$T2,$D4 # d4 = h2*r2
+ vpmuludq $H2,$T3,$D0 # d0 = h2*s3
+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4
+
+ vpmuludq $H0,$T1,$T4 # h0*r1
+ vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
+ vpaddq $T4,$D1,$D1 # d1 += h0*r1
+ vpaddq $H2,$D2,$D2 # d2 += h1*r1
+ vpmuludq $H3,$T1,$T4 # h3*r1
+ vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
+ vpaddq $T4,$D4,$D4 # d4 += h3*r1
+ vpaddq $H2,$D0,$D0 # d0 += h4*s1
+ vmovdqa `32*4-0x90`(%rax),$T1 # s2
+
+ vpmuludq $H0,$T0,$T4 # h0*r0
+ vpmuludq $H1,$T0,$H2 # h1*r0
+ vpaddq $T4,$D0,$D0 # d0 += h0*r0
+ vpaddq $H2,$D1,$D1 # d1 += h1*r0
+ vpmuludq $H3,$T0,$T4 # h3*r0
+ vpmuludq $H4,$T0,$H2 # h4*r0
+ vmovdqu 16*0($inp),%x#$T0 # load input
+ vpaddq $T4,$D3,$D3 # d3 += h3*r0
+ vpaddq $H2,$D4,$D4 # d4 += h4*r0
+ vinserti128 \$1,16*2($inp),$T0,$T0
+
+ vpmuludq $H3,$T1,$T4 # h3*s2
+ vpmuludq $H4,$T1,$H2 # h4*s2
+ vmovdqu 16*1($inp),%x#$T1
+ vpaddq $T4,$D0,$D0 # d0 += h3*s2
+ vpaddq $H2,$D1,$D1 # d1 += h4*s2
+ vmovdqa `32*5-0x90`(%rax),$H2 # r3
+ vpmuludq $H1,$T2,$T4 # h1*r2
+ vpmuludq $H0,$T2,$T2 # h0*r2
+ vpaddq $T4,$D3,$D3 # d3 += h1*r2
+ vpaddq $T2,$D2,$D2 # d2 += h0*r2
+ vinserti128 \$1,16*3($inp),$T1,$T1
+ lea 16*4($inp),$inp
+
+ vpmuludq $H1,$H2,$T4 # h1*r3
+ vpmuludq $H0,$H2,$H2 # h0*r3
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpaddq $T4,$D4,$D4 # d4 += h1*r3
+ vpaddq $H2,$D3,$D3 # d3 += h0*r3
+ vpmuludq $H3,$T3,$T4 # h3*s3
+ vpmuludq $H4,$T3,$H2 # h4*s3
+ vpsrldq \$6,$T1,$T3
+ vpaddq $T4,$D1,$D1 # d1 += h3*s3
+ vpaddq $H2,$D2,$D2 # d2 += h4*s3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+
+ vpmuludq $H3,$S4,$H3 # h3*s4
+ vpmuludq $H4,$S4,$H4 # h4*s4
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
+ vpunpcklqdq $T3,$T2,$T3 # 2:3
+ vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
+ vpmuludq $H1,$S4,$H0 # h1*s4
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
+
+ ################################################################
+ # lazy reduction (interleaved with tail of input splat)
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$D1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D4
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$4,$T3,$T2
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D4,$H0,$H0
+ vpsllq \$2,$D4,$D4
+ vpaddq $D4,$H0,$H0 # h4 -> h0
+
+ vpand $MASK,$T2,$T2 # 2
+ vpsrlq \$26,$T0,$T1
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpaddq $T2,$H2,$H2 # modulo-scheduled
+ vpsrlq \$30,$T3,$T3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$40,$T4,$T4 # 4
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpand $MASK,$T0,$T0 # 0
+ vpand $MASK,$T1,$T1 # 1
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ sub \$64,$len
+ jnz .Loop_avx2$suffix
+
+ .byte 0x66,0x90
+.Ltail_avx2$suffix:
+ ################################################################
+ # while above multiplications were by r^4 in all lanes, in last
+ # iteration we multiply least significant lane by r^4 and most
+ # significant one by r, so copy of above except that references
+ # to the precomputed table are displaced by 4...
+
+ #vpaddq $H2,$T2,$H2 # accumulate input
+ vpaddq $H0,$T0,$H0
+ vmovdqu `32*0+4`(%rsp),$T0 # r0^4
+ vpaddq $H1,$T1,$H1
+ vmovdqu `32*1+4`(%rsp),$T1 # r1^4
+ vpaddq $H3,$T3,$H3
+ vmovdqu `32*3+4`(%rsp),$T2 # r2^4
+ vpaddq $H4,$T4,$H4
+ vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
+ vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
+
+ vpmuludq $H2,$T0,$D2 # d2 = h2*r0
+ vpmuludq $H2,$T1,$D3 # d3 = h2*r1
+ vpmuludq $H2,$T2,$D4 # d4 = h2*r2
+ vpmuludq $H2,$T3,$D0 # d0 = h2*s3
+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4
+
+ vpmuludq $H0,$T1,$T4 # h0*r1
+ vpmuludq $H1,$T1,$H2 # h1*r1
+ vpaddq $T4,$D1,$D1 # d1 += h0*r1
+ vpaddq $H2,$D2,$D2 # d2 += h1*r1
+ vpmuludq $H3,$T1,$T4 # h3*r1
+ vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
+ vpaddq $T4,$D4,$D4 # d4 += h3*r1
+ vpaddq $H2,$D0,$D0 # d0 += h4*s1
+
+ vpmuludq $H0,$T0,$T4 # h0*r0
+ vpmuludq $H1,$T0,$H2 # h1*r0
+ vpaddq $T4,$D0,$D0 # d0 += h0*r0
+ vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
+ vpaddq $H2,$D1,$D1 # d1 += h1*r0
+ vpmuludq $H3,$T0,$T4 # h3*r0
+ vpmuludq $H4,$T0,$H2 # h4*r0
+ vpaddq $T4,$D3,$D3 # d3 += h3*r0
+ vpaddq $H2,$D4,$D4 # d4 += h4*r0
+
+ vpmuludq $H3,$T1,$T4 # h3*s2
+ vpmuludq $H4,$T1,$H2 # h4*s2
+ vpaddq $T4,$D0,$D0 # d0 += h3*s2
+ vpaddq $H2,$D1,$D1 # d1 += h4*s2
+ vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
+ vpmuludq $H1,$T2,$T4 # h1*r2
+ vpmuludq $H0,$T2,$T2 # h0*r2
+ vpaddq $T4,$D3,$D3 # d3 += h1*r2
+ vpaddq $T2,$D2,$D2 # d2 += h0*r2
+
+ vpmuludq $H1,$H2,$T4 # h1*r3
+ vpmuludq $H0,$H2,$H2 # h0*r3
+ vpaddq $T4,$D4,$D4 # d4 += h1*r3
+ vpaddq $H2,$D3,$D3 # d3 += h0*r3
+ vpmuludq $H3,$T3,$T4 # h3*s3
+ vpmuludq $H4,$T3,$H2 # h4*s3
+ vpaddq $T4,$D1,$D1 # d1 += h3*s3
+ vpaddq $H2,$D2,$D2 # d2 += h4*s3
+
+ vpmuludq $H3,$S4,$H3 # h3*s4
+ vpmuludq $H4,$S4,$H4 # h4*s4
+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
+ vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
+ vpmuludq $H1,$S4,$H0 # h1*s4
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
+
+ ################################################################
+ # horizontal addition
+
+ vpsrldq \$8,$D1,$T1
+ vpsrldq \$8,$H2,$T2
+ vpsrldq \$8,$H3,$T3
+ vpsrldq \$8,$H4,$T4
+ vpsrldq \$8,$H0,$T0
+ vpaddq $T1,$D1,$D1
+ vpaddq $T2,$H2,$H2
+ vpaddq $T3,$H3,$H3
+ vpaddq $T4,$H4,$H4
+ vpaddq $T0,$H0,$H0
+
+ vpermq \$0x2,$H3,$T3
+ vpermq \$0x2,$H4,$T4
+ vpermq \$0x2,$H0,$T0
+ vpermq \$0x2,$D1,$T1
+ vpermq \$0x2,$H2,$T2
+ vpaddq $T3,$H3,$H3
+ vpaddq $T4,$H4,$H4
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$D1,$D1
+ vpaddq $T2,$H2,$H2
+
+ ################################################################
+ # lazy reduction
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$D1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D4
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D4,$H0,$H0
+ vpsllq \$2,$D4,$D4
+ vpaddq $D4,$H0,$H0 # h4 -> h0
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
+ vmovd %x#$H1,`4*1-48-64`($ctx)
+ vmovd %x#$H2,`4*2-48-64`($ctx)
+ vmovd %x#$H3,`4*3-48-64`($ctx)
+ vmovd %x#$H4,`4*4-48-64`($ctx)
+___
+$code.=<<___ if ($win64);
+ vmovdqa -0xb0(%r10),%xmm6
+ vmovdqa -0xa0(%r10),%xmm7
+ vmovdqa -0x90(%r10),%xmm8
+ vmovdqa -0x80(%r10),%xmm9
+ vmovdqa -0x70(%r10),%xmm10
+ vmovdqa -0x60(%r10),%xmm11
+ vmovdqa -0x50(%r10),%xmm12
+ vmovdqa -0x40(%r10),%xmm13
+ vmovdqa -0x30(%r10),%xmm14
+ vmovdqa -0x20(%r10),%xmm15
+ lea -8(%r10),%rsp
+.Ldo_avx2_epilogue$suffix:
+___
+$code.=<<___ if (!$win64);
+ lea -8(%r10),%rsp
+.cfi_def_cfa_register %rsp
+___
+$code.=<<___;
+ vzeroupper
+ ret
+.cfi_endproc
+___
+if($avx > 2 && $avx512) {
+my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
+my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
+my $PADBIT="%zmm30";
+
+map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain
+map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
+map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
+map(s/%y/%z/,($MASK));
+
+$code.=<<___;
+.cfi_startproc
+.Lblocks_avx512:
+ mov \$15,%eax
+ kmovw %eax,%k2
+___
+$code.=<<___ if (!$win64);
+ lea 8(%rsp),%r10
+.cfi_def_cfa_register %r10
+ sub \$0x128,%rsp
+___
+$code.=<<___ if ($win64);
+ lea 8(%rsp),%r10
+ sub \$0x1c8,%rsp
+ vmovdqa %xmm6,-0xb0(%r10)
+ vmovdqa %xmm7,-0xa0(%r10)
+ vmovdqa %xmm8,-0x90(%r10)
+ vmovdqa %xmm9,-0x80(%r10)
+ vmovdqa %xmm10,-0x70(%r10)
+ vmovdqa %xmm11,-0x60(%r10)
+ vmovdqa %xmm12,-0x50(%r10)
+ vmovdqa %xmm13,-0x40(%r10)
+ vmovdqa %xmm14,-0x30(%r10)
+ vmovdqa %xmm15,-0x20(%r10)
+.Ldo_avx512_body:
+___
+$code.=<<___;
+ lea .Lconst(%rip),%rcx
+ lea 48+64($ctx),$ctx # size optimization
+ vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2
+
+ # expand pre-calculated table
+ vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0}
+ and \$-512,%rsp
+ vmovdqu `16*1-64`($ctx),%x#$D1 # will become ... ${R1}
+ mov \$0x20,%rax
+ vmovdqu `16*2-64`($ctx),%x#$T0 # ... ${S1}
+ vmovdqu `16*3-64`($ctx),%x#$D2 # ... ${R2}
+ vmovdqu `16*4-64`($ctx),%x#$T1 # ... ${S2}
+ vmovdqu `16*5-64`($ctx),%x#$D3 # ... ${R3}
+ vmovdqu `16*6-64`($ctx),%x#$T3 # ... ${S3}
+ vmovdqu `16*7-64`($ctx),%x#$D4 # ... ${R4}
+ vmovdqu `16*8-64`($ctx),%x#$T4 # ... ${S4}
+ vpermd $D0,$T2,$R0 # 00003412 -> 14243444
+ vpbroadcastq 64(%rcx),$MASK # .Lmask26
+ vpermd $D1,$T2,$R1
+ vpermd $T0,$T2,$S1
+ vpermd $D2,$T2,$R2
+ vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0
+ vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
+ vpermd $T1,$T2,$S2
+ vmovdqu64 $R1,0x00(%rsp,%rax){%k2}
+ vpsrlq \$32,$R1,$T1
+ vpermd $D3,$T2,$R3
+ vmovdqa64 $S1,0x40(%rsp){%k2}
+ vpermd $T3,$T2,$S3
+ vpermd $D4,$T2,$R4
+ vmovdqu64 $R2,0x40(%rsp,%rax){%k2}
+ vpermd $T4,$T2,$S4
+ vmovdqa64 $S2,0x80(%rsp){%k2}
+ vmovdqu64 $R3,0x80(%rsp,%rax){%k2}
+ vmovdqa64 $S3,0xc0(%rsp){%k2}
+ vmovdqu64 $R4,0xc0(%rsp,%rax){%k2}
+ vmovdqa64 $S4,0x100(%rsp){%k2}
+
+ ################################################################
+ # calculate 5th through 8th powers of the key
+ #
+ # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
+ # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
+ # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
+ # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
+ # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
+
+ vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
+ vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
+ vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
+ vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
+ vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
+ vpsrlq \$32,$R2,$T2
+
+ vpmuludq $T1,$S4,$M0
+ vpmuludq $T1,$R0,$M1
+ vpmuludq $T1,$R1,$M2
+ vpmuludq $T1,$R2,$M3
+ vpmuludq $T1,$R3,$M4
+ vpsrlq \$32,$R3,$T3
+ vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
+ vpaddq $M1,$D1,$D1 # d1 += r1'*r0
+ vpaddq $M2,$D2,$D2 # d2 += r1'*r1
+ vpaddq $M3,$D3,$D3 # d3 += r1'*r2
+ vpaddq $M4,$D4,$D4 # d4 += r1'*r3
+
+ vpmuludq $T2,$S3,$M0
+ vpmuludq $T2,$S4,$M1
+ vpmuludq $T2,$R1,$M3
+ vpmuludq $T2,$R2,$M4
+ vpmuludq $T2,$R0,$M2
+ vpsrlq \$32,$R4,$T4
+ vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
+ vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
+ vpaddq $M3,$D3,$D3 # d3 += r2'*r1
+ vpaddq $M4,$D4,$D4 # d4 += r2'*r2
+ vpaddq $M2,$D2,$D2 # d2 += r2'*r0
+
+ vpmuludq $T3,$S2,$M0
+ vpmuludq $T3,$R0,$M3
+ vpmuludq $T3,$R1,$M4
+ vpmuludq $T3,$S3,$M1
+ vpmuludq $T3,$S4,$M2
+ vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
+ vpaddq $M3,$D3,$D3 # d3 += r3'*r0
+ vpaddq $M4,$D4,$D4 # d4 += r3'*r1
+ vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
+ vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
+
+ vpmuludq $T4,$S4,$M3
+ vpmuludq $T4,$R0,$M4
+ vpmuludq $T4,$S1,$M0
+ vpmuludq $T4,$S2,$M1
+ vpmuludq $T4,$S3,$M2
+ vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
+ vpaddq $M4,$D4,$D4 # d4 += r2'*r0
+ vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
+ vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
+ vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
+
+ ################################################################
+ # load input
+ vmovdqu64 16*0($inp),%z#$T3
+ vmovdqu64 16*4($inp),%z#$T4
+ lea 16*8($inp),$inp
+
+ ################################################################
+ # lazy reduction
+
+ vpsrlq \$26,$D3,$M3
+ vpandq $MASK,$D3,$D3
+ vpaddq $M3,$D4,$D4 # d3 -> d4
+
+ vpsrlq \$26,$D0,$M0
+ vpandq $MASK,$D0,$D0
+ vpaddq $M0,$D1,$D1 # d0 -> d1
+
+ vpsrlq \$26,$D4,$M4
+ vpandq $MASK,$D4,$D4
+
+ vpsrlq \$26,$D1,$M1
+ vpandq $MASK,$D1,$D1
+ vpaddq $M1,$D2,$D2 # d1 -> d2
+
+ vpaddq $M4,$D0,$D0
+ vpsllq \$2,$M4,$M4
+ vpaddq $M4,$D0,$D0 # d4 -> d0
+
+ vpsrlq \$26,$D2,$M2
+ vpandq $MASK,$D2,$D2
+ vpaddq $M2,$D3,$D3 # d2 -> d3
+
+ vpsrlq \$26,$D0,$M0
+ vpandq $MASK,$D0,$D0
+ vpaddq $M0,$D1,$D1 # d0 -> d1
+
+ vpsrlq \$26,$D3,$M3
+ vpandq $MASK,$D3,$D3
+ vpaddq $M3,$D4,$D4 # d3 -> d4
+
+ ################################################################
+ # at this point we have 14243444 in $R0-$S4 and 05060708 in
+ # $D0-$D4, ...
+
+ vpunpcklqdq $T4,$T3,$T0 # transpose input
+ vpunpckhqdq $T4,$T3,$T4
+
+ # ... since input 64-bit lanes are ordered as 73625140, we could
+ # "vperm" it to 76543210 (here and in each loop iteration), *or*
+ # we could just flow along, hence the goal for $R0-$S4 is
+ # 1858286838784888 ...
+
+ vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512:
+ mov \$0x7777,%eax
+ kmovw %eax,%k1
+
+ vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4---
+ vpermd $R1,$M0,$R1
+ vpermd $R2,$M0,$R2
+ vpermd $R3,$M0,$R3
+ vpermd $R4,$M0,$R4
+
+ vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888
+ vpermd $D1,$M0,${R1}{%k1}
+ vpermd $D2,$M0,${R2}{%k1}
+ vpermd $D3,$M0,${R3}{%k1}
+ vpermd $D4,$M0,${R4}{%k1}
+
+ vpslld \$2,$R1,$S1 # *5
+ vpslld \$2,$R2,$S2
+ vpslld \$2,$R3,$S3
+ vpslld \$2,$R4,$S4
+ vpaddd $R1,$S1,$S1
+ vpaddd $R2,$S2,$S2
+ vpaddd $R3,$S3,$S3
+ vpaddd $R4,$S4,$S4
+
+ vpbroadcastq 32(%rcx),$PADBIT # .L129
+
+ vpsrlq \$52,$T0,$T2 # splat input
+ vpsllq \$12,$T4,$T3
+ vporq $T3,$T2,$T2
+ vpsrlq \$26,$T0,$T1
+ vpsrlq \$14,$T4,$T3
+ vpsrlq \$40,$T4,$T4 # 4
+ vpandq $MASK,$T2,$T2 # 2
+ vpandq $MASK,$T0,$T0 # 0
+ #vpandq $MASK,$T1,$T1 # 1
+ #vpandq $MASK,$T3,$T3 # 3
+ #vporq $PADBIT,$T4,$T4 # padbit, yes, always
+
+ vpaddq $H2,$T2,$H2 # accumulate input
+ sub \$192,$len
+ jbe .Ltail_avx512
+ jmp .Loop_avx512
+
+.align 32
+.Loop_avx512:
+ ################################################################
+ # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
+ # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
+ # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
+ # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
+ # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
+ # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
+ # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
+ # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
+ # \________/\___________/
+ ################################################################
+ #vpaddq $H2,$T2,$H2 # accumulate input
+
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ #
+ # however, as h2 is "chronologically" first one available pull
+ # corresponding operations up, so it's
+ #
+ # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
+ # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
+ # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
+ # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
+ # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
+
+ vpmuludq $H2,$R1,$D3 # d3 = h2*r1
+ vpaddq $H0,$T0,$H0
+ vpmuludq $H2,$R2,$D4 # d4 = h2*r2
+ vpandq $MASK,$T1,$T1 # 1
+ vpmuludq $H2,$S3,$D0 # d0 = h2*s3
+ vpandq $MASK,$T3,$T3 # 3
+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4
+ vporq $PADBIT,$T4,$T4 # padbit, yes, always
+ vpmuludq $H2,$R0,$D2 # d2 = h2*r0
+ vpaddq $H1,$T1,$H1 # accumulate input
+ vpaddq $H3,$T3,$H3
+ vpaddq $H4,$T4,$H4
+
+ vmovdqu64 16*0($inp),$T3 # load input
+ vmovdqu64 16*4($inp),$T4
+ lea 16*8($inp),$inp
+ vpmuludq $H0,$R3,$M3
+ vpmuludq $H0,$R4,$M4
+ vpmuludq $H0,$R0,$M0
+ vpmuludq $H0,$R1,$M1
+ vpaddq $M3,$D3,$D3 # d3 += h0*r3
+ vpaddq $M4,$D4,$D4 # d4 += h0*r4
+ vpaddq $M0,$D0,$D0 # d0 += h0*r0
+ vpaddq $M1,$D1,$D1 # d1 += h0*r1
+
+ vpmuludq $H1,$R2,$M3
+ vpmuludq $H1,$R3,$M4
+ vpmuludq $H1,$S4,$M0
+ vpmuludq $H0,$R2,$M2
+ vpaddq $M3,$D3,$D3 # d3 += h1*r2
+ vpaddq $M4,$D4,$D4 # d4 += h1*r3
+ vpaddq $M0,$D0,$D0 # d0 += h1*s4
+ vpaddq $M2,$D2,$D2 # d2 += h0*r2
+
+ vpunpcklqdq $T4,$T3,$T0 # transpose input
+ vpunpckhqdq $T4,$T3,$T4
+
+ vpmuludq $H3,$R0,$M3
+ vpmuludq $H3,$R1,$M4
+ vpmuludq $H1,$R0,$M1
+ vpmuludq $H1,$R1,$M2
+ vpaddq $M3,$D3,$D3 # d3 += h3*r0
+ vpaddq $M4,$D4,$D4 # d4 += h3*r1
+ vpaddq $M1,$D1,$D1 # d1 += h1*r0
+ vpaddq $M2,$D2,$D2 # d2 += h1*r1
+
+ vpmuludq $H4,$S4,$M3
+ vpmuludq $H4,$R0,$M4
+ vpmuludq $H3,$S2,$M0
+ vpmuludq $H3,$S3,$M1
+ vpaddq $M3,$D3,$D3 # d3 += h4*s4
+ vpmuludq $H3,$S4,$M2
+ vpaddq $M4,$D4,$D4 # d4 += h4*r0
+ vpaddq $M0,$D0,$D0 # d0 += h3*s2
+ vpaddq $M1,$D1,$D1 # d1 += h3*s3
+ vpaddq $M2,$D2,$D2 # d2 += h3*s4
+
+ vpmuludq $H4,$S1,$M0
+ vpmuludq $H4,$S2,$M1
+ vpmuludq $H4,$S3,$M2
+ vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
+ vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
+ vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
+
+ ################################################################
+ # lazy reduction (interleaved with input splat)
+
+ vpsrlq \$52,$T0,$T2 # splat input
+ vpsllq \$12,$T4,$T3
+
+ vpsrlq \$26,$D3,$H3
+ vpandq $MASK,$D3,$D3
+ vpaddq $H3,$D4,$H4 # h3 -> h4
+
+ vporq $T3,$T2,$T2
+
+ vpsrlq \$26,$H0,$D0
+ vpandq $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpandq $MASK,$T2,$T2 # 2
+
+ vpsrlq \$26,$H4,$D4
+ vpandq $MASK,$H4,$H4
+
+ vpsrlq \$26,$H1,$D1
+ vpandq $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D4,$H0,$H0
+ vpsllq \$2,$D4,$D4
+ vpaddq $D4,$H0,$H0 # h4 -> h0
+
+ vpaddq $T2,$H2,$H2 # modulo-scheduled
+ vpsrlq \$26,$T0,$T1
+
+ vpsrlq \$26,$H2,$D2
+ vpandq $MASK,$H2,$H2
+ vpaddq $D2,$D3,$H3 # h2 -> h3
+
+ vpsrlq \$14,$T4,$T3
+
+ vpsrlq \$26,$H0,$D0
+ vpandq $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$40,$T4,$T4 # 4
+
+ vpsrlq \$26,$H3,$D3
+ vpandq $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpandq $MASK,$T0,$T0 # 0
+ #vpandq $MASK,$T1,$T1 # 1
+ #vpandq $MASK,$T3,$T3 # 3
+ #vporq $PADBIT,$T4,$T4 # padbit, yes, always
+
+ sub \$128,$len
+ ja .Loop_avx512
+
+.Ltail_avx512:
+ ################################################################
+ # while above multiplications were by r^8 in all lanes, in last
+ # iteration we multiply least significant lane by r^8 and most
+ # significant one by r, that's why table gets shifted...
+
+ vpsrlq \$32,$R0,$R0 # 0105020603070408
+ vpsrlq \$32,$R1,$R1
+ vpsrlq \$32,$R2,$R2
+ vpsrlq \$32,$S3,$S3
+ vpsrlq \$32,$S4,$S4
+ vpsrlq \$32,$R3,$R3
+ vpsrlq \$32,$R4,$R4
+ vpsrlq \$32,$S1,$S1
+ vpsrlq \$32,$S2,$S2
+
+ ################################################################
+ # load either next or last 64 byte of input
+ lea ($inp,$len),$inp
+
+ #vpaddq $H2,$T2,$H2 # accumulate input
+ vpaddq $H0,$T0,$H0
+
+ vpmuludq $H2,$R1,$D3 # d3 = h2*r1
+ vpmuludq $H2,$R2,$D4 # d4 = h2*r2
+ vpmuludq $H2,$S3,$D0 # d0 = h2*s3
+ vpandq $MASK,$T1,$T1 # 1
+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4
+ vpandq $MASK,$T3,$T3 # 3
+ vpmuludq $H2,$R0,$D2 # d2 = h2*r0
+ vporq $PADBIT,$T4,$T4 # padbit, yes, always
+ vpaddq $H1,$T1,$H1 # accumulate input
+ vpaddq $H3,$T3,$H3
+ vpaddq $H4,$T4,$H4
+
+ vmovdqu 16*0($inp),%x#$T0
+ vpmuludq $H0,$R3,$M3
+ vpmuludq $H0,$R4,$M4
+ vpmuludq $H0,$R0,$M0
+ vpmuludq $H0,$R1,$M1
+ vpaddq $M3,$D3,$D3 # d3 += h0*r3
+ vpaddq $M4,$D4,$D4 # d4 += h0*r4
+ vpaddq $M0,$D0,$D0 # d0 += h0*r0
+ vpaddq $M1,$D1,$D1 # d1 += h0*r1
+
+ vmovdqu 16*1($inp),%x#$T1
+ vpmuludq $H1,$R2,$M3
+ vpmuludq $H1,$R3,$M4
+ vpmuludq $H1,$S4,$M0
+ vpmuludq $H0,$R2,$M2
+ vpaddq $M3,$D3,$D3 # d3 += h1*r2
+ vpaddq $M4,$D4,$D4 # d4 += h1*r3
+ vpaddq $M0,$D0,$D0 # d0 += h1*s4
+ vpaddq $M2,$D2,$D2 # d2 += h0*r2
+
+ vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0
+ vpmuludq $H3,$R0,$M3
+ vpmuludq $H3,$R1,$M4
+ vpmuludq $H1,$R0,$M1
+ vpmuludq $H1,$R1,$M2
+ vpaddq $M3,$D3,$D3 # d3 += h3*r0
+ vpaddq $M4,$D4,$D4 # d4 += h3*r1
+ vpaddq $M1,$D1,$D1 # d1 += h1*r0
+ vpaddq $M2,$D2,$D2 # d2 += h1*r1
+
+ vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1
+ vpmuludq $H4,$S4,$M3
+ vpmuludq $H4,$R0,$M4
+ vpmuludq $H3,$S2,$M0
+ vpmuludq $H3,$S3,$M1
+ vpmuludq $H3,$S4,$M2
+ vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
+ vpaddq $M4,$D4,$D4 # d4 += h4*r0
+ vpaddq $M0,$D0,$D0 # d0 += h3*s2
+ vpaddq $M1,$D1,$D1 # d1 += h3*s3
+ vpaddq $M2,$D2,$D2 # d2 += h3*s4
+
+ vpmuludq $H4,$S1,$M0
+ vpmuludq $H4,$S2,$M1
+ vpmuludq $H4,$S3,$M2
+ vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
+ vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
+ vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
+
+ ################################################################
+ # horizontal addition
+
+ mov \$1,%eax
+ vpermq \$0xb1,$H3,$D3
+ vpermq \$0xb1,$D4,$H4
+ vpermq \$0xb1,$H0,$D0
+ vpermq \$0xb1,$H1,$D1
+ vpermq \$0xb1,$H2,$D2
+ vpaddq $D3,$H3,$H3
+ vpaddq $D4,$H4,$H4
+ vpaddq $D0,$H0,$H0
+ vpaddq $D1,$H1,$H1
+ vpaddq $D2,$H2,$H2
+
+ kmovw %eax,%k3
+ vpermq \$0x2,$H3,$D3
+ vpermq \$0x2,$H4,$D4
+ vpermq \$0x2,$H0,$D0
+ vpermq \$0x2,$H1,$D1
+ vpermq \$0x2,$H2,$D2
+ vpaddq $D3,$H3,$H3
+ vpaddq $D4,$H4,$H4
+ vpaddq $D0,$H0,$H0
+ vpaddq $D1,$H1,$H1
+ vpaddq $D2,$H2,$H2
+
+ vextracti64x4 \$0x1,$H3,%y#$D3
+ vextracti64x4 \$0x1,$H4,%y#$D4
+ vextracti64x4 \$0x1,$H0,%y#$D0
+ vextracti64x4 \$0x1,$H1,%y#$D1
+ vextracti64x4 \$0x1,$H2,%y#$D2
+ vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
+ vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
+ vpaddq $D0,$H0,${H0}{%k3}{z}
+ vpaddq $D1,$H1,${H1}{%k3}{z}
+ vpaddq $D2,$H2,${H2}{%k3}{z}
+___
+map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
+map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
+$code.=<<___;
+ ################################################################
+ # lazy reduction (interleaved with input splat)
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpsrldq \$6,$T1,$T3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpunpcklqdq $T3,$T2,$T2 # 2:3
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D4
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpsrlq \$30,$T2,$T3
+ vpsrlq \$4,$T2,$T2
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D4,$H0,$H0
+ vpsllq \$2,$D4,$D4
+ vpsrlq \$26,$T0,$T1
+ vpsrlq \$40,$T4,$T4 # 4
+ vpaddq $D4,$H0,$H0 # h4 -> h0
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T0,$T0 # 0
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
+ vpand $MASK,$T1,$T1 # 1
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
+ add \$64,$len
+ jnz .Ltail_avx2$suffix
+
+ vpsubq $T2,$H2,$H2 # undo input accumulation
+ vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
+ vmovd %x#$H1,`4*1-48-64`($ctx)
+ vmovd %x#$H2,`4*2-48-64`($ctx)
+ vmovd %x#$H3,`4*3-48-64`($ctx)
+ vmovd %x#$H4,`4*4-48-64`($ctx)
+ vzeroall
+___
+$code.=<<___ if ($win64);
+ movdqa -0xb0(%r10),%xmm6
+ movdqa -0xa0(%r10),%xmm7
+ movdqa -0x90(%r10),%xmm8
+ movdqa -0x80(%r10),%xmm9
+ movdqa -0x70(%r10),%xmm10
+ movdqa -0x60(%r10),%xmm11
+ movdqa -0x50(%r10),%xmm12
+ movdqa -0x40(%r10),%xmm13
+ movdqa -0x30(%r10),%xmm14
+ movdqa -0x20(%r10),%xmm15
+ lea -8(%r10),%rsp
+.Ldo_avx512_epilogue:
+___
+$code.=<<___ if (!$win64);
+ lea -8(%r10),%rsp
+.cfi_def_cfa_register %rsp
+___
+$code.=<<___;
+ ret
+.cfi_endproc
+___
+
+}
+
+}
+
+&declare_function("poly1305_blocks_avx2", 32, 4);
+poly1305_blocks_avxN(0);
+&end_function("poly1305_blocks_avx2");
+
+if($kernel) {
+ $code .= "#endif\n";
+}
+
+#######################################################################
+if ($avx>2) {
+# On entry we have input length divisible by 64. But since inner loop
+# processes 128 bytes per iteration, cases when length is not divisible
+# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
+# reason stack layout is kept identical to poly1305_blocks_avx2. If not
+# for this tail, we wouldn't have to even allocate stack frame...
+
+if($kernel) {
+ $code .= "#ifdef CONFIG_AS_AVX512\n";
+}
+
+&declare_function("poly1305_blocks_avx512", 32, 4);
+poly1305_blocks_avxN(1);
+&end_function("poly1305_blocks_avx512");
+
+if ($kernel) {
+ $code .= "#endif\n";
+}
+
+if (!$kernel && $avx>3) {
+########################################################################
+# VPMADD52 version using 2^44 radix.
+#
+# One can argue that base 2^52 would be more natural. Well, even though
+# some operations would be more natural, one has to recognize couple of
+# things. Base 2^52 doesn't provide advantage over base 2^44 if you look
+# at amount of multiply-n-accumulate operations. Secondly, it makes it
+# impossible to pre-compute multiples of 5 [referred to as s[]/sN in
+# reference implementations], which means that more such operations
+# would have to be performed in inner loop, which in turn makes critical
+# path longer. In other words, even though base 2^44 reduction might
+# look less elegant, overall critical path is actually shorter...
+
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int64 h[3]; # current hash value base 2^44
+# unsigned __int64 s[2]; # key value*20 base 2^44
+# unsigned __int64 r[3]; # key value base 2^44
+# struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
+# # r^n positions reflect
+# # placement in register, not
+# # memory, R[3] is R[1]*20
+
+$code.=<<___;
+.type poly1305_init_base2_44,\@function,3
+.align 32
+poly1305_init_base2_44:
+ xor %rax,%rax
+ mov %rax,0($ctx) # initialize hash value
+ mov %rax,8($ctx)
+ mov %rax,16($ctx)
+
+.Linit_base2_44:
+ lea poly1305_blocks_vpmadd52(%rip),%r10
+ lea poly1305_emit_base2_44(%rip),%r11
+
+ mov \$0x0ffffffc0fffffff,%rax
+ mov \$0x0ffffffc0ffffffc,%rcx
+ and 0($inp),%rax
+ mov \$0x00000fffffffffff,%r8
+ and 8($inp),%rcx
+ mov \$0x00000fffffffffff,%r9
+ and %rax,%r8
+ shrd \$44,%rcx,%rax
+ mov %r8,40($ctx) # r0
+ and %r9,%rax
+ shr \$24,%rcx
+ mov %rax,48($ctx) # r1
+ lea (%rax,%rax,4),%rax # *5
+ mov %rcx,56($ctx) # r2
+ shl \$2,%rax # magic <<2
+ lea (%rcx,%rcx,4),%rcx # *5
+ shl \$2,%rcx # magic <<2
+ mov %rax,24($ctx) # s1
+ mov %rcx,32($ctx) # s2
+ movq \$-1,64($ctx) # write impossible value
+___
+$code.=<<___ if ($flavour !~ /elf32/);
+ mov %r10,0(%rdx)
+ mov %r11,8(%rdx)
+___
+$code.=<<___ if ($flavour =~ /elf32/);
+ mov %r10d,0(%rdx)
+ mov %r11d,4(%rdx)
+___
+$code.=<<___;
+ mov \$1,%eax
+ ret
+.size poly1305_init_base2_44,.-poly1305_init_base2_44
+___
+{
+my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
+my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
+my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
+
+$code.=<<___;
+.type poly1305_blocks_vpmadd52,\@function,4
+.align 32
+poly1305_blocks_vpmadd52:
+ shr \$4,$len
+ jz .Lno_data_vpmadd52 # too short
+
+ shl \$40,$padbit
+ mov 64($ctx),%r8 # peek on power of the key
+
+ # if powers of the key are not calculated yet, process up to 3
+ # blocks with this single-block subroutine, otherwise ensure that
+ # length is divisible by 2 blocks and pass the rest down to next
+ # subroutine...
+
+ mov \$3,%rax
+ mov \$1,%r10
+ cmp \$4,$len # is input long
+ cmovae %r10,%rax
+ test %r8,%r8 # is power value impossible?
+ cmovns %r10,%rax
+
+ and $len,%rax # is input of favourable length?
+ jz .Lblocks_vpmadd52_4x
+
+ sub %rax,$len
+ mov \$7,%r10d
+ mov \$1,%r11d
+ kmovw %r10d,%k7
+ lea .L2_44_inp_permd(%rip),%r10
+ kmovw %r11d,%k1
+
+ vmovq $padbit,%x#$PAD
+ vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd
+ vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift
+ vpermq \$0xcf,$PAD,$PAD
+ vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask
+
+ vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value
+ vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys
+ vmovdqu64 32($ctx),${r1r0s2}{%k7}{z}
+ vmovdqu64 24($ctx),${r0s2s1}{%k7}{z}
+
+ vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt
+ vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft
+
+ jmp .Loop_vpmadd52
+
+.align 32
+.Loop_vpmadd52:
+ vmovdqu32 0($inp),%x#$T0 # load input as ----3210
+ lea 16($inp),$inp
+
+ vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110
+ vpsrlvq $inp_shift,$T0,$T0
+ vpandq $reduc_mask,$T0,$T0
+ vporq $PAD,$T0,$T0
+
+ vpaddq $T0,$Dlo,$Dlo # accumulate input
+
+ vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value
+ vpermq \$0b01010101,$Dlo,${H1}{%k7}{z}
+ vpermq \$0b10101010,$Dlo,${H2}{%k7}{z}
+
+ vpxord $Dlo,$Dlo,$Dlo
+ vpxord $Dhi,$Dhi,$Dhi
+
+ vpmadd52luq $r2r1r0,$H0,$Dlo
+ vpmadd52huq $r2r1r0,$H0,$Dhi
+
+ vpmadd52luq $r1r0s2,$H1,$Dlo
+ vpmadd52huq $r1r0s2,$H1,$Dhi
+
+ vpmadd52luq $r0s2s1,$H2,$Dlo
+ vpmadd52huq $r0s2s1,$H2,$Dhi
+
+ vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword
+ vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword
+ vpandq $reduc_mask,$Dlo,$Dlo
+
+ vpaddq $T0,$Dhi,$Dhi
+
+ vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword
+
+ vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-)
+
+ vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word
+ vpandq $reduc_mask,$Dlo,$Dlo
+
+ vpermq \$0b10010011,$T0,$T0
+
+ vpaddq $T0,$Dlo,$Dlo
+
+ vpermq \$0b10010011,$Dlo,${T0}{%k1}{z}
+
+ vpaddq $T0,$Dlo,$Dlo
+ vpsllq \$2,$T0,$T0
+
+ vpaddq $T0,$Dlo,$Dlo
+
+ dec %rax # len-=16
+ jnz .Loop_vpmadd52
+
+ vmovdqu64 $Dlo,0($ctx){%k7} # store hash value
+
+ test $len,$len
+ jnz .Lblocks_vpmadd52_4x
+
+.Lno_data_vpmadd52:
+ ret
+.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
+___
+}
+{
+########################################################################
+# As implied by its name 4x subroutine processes 4 blocks in parallel
+# (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
+# and is handled in 256-bit %ymm registers.
+
+my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
+my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
+my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
+
+$code.=<<___;
+.type poly1305_blocks_vpmadd52_4x,\@function,4
+.align 32
+poly1305_blocks_vpmadd52_4x:
+ shr \$4,$len
+ jz .Lno_data_vpmadd52_4x # too short
+
+ shl \$40,$padbit
+ mov 64($ctx),%r8 # peek on power of the key
+
+.Lblocks_vpmadd52_4x:
+ vpbroadcastq $padbit,$PAD
+
+ vmovdqa64 .Lx_mask44(%rip),$mask44
+ mov \$5,%eax
+ vmovdqa64 .Lx_mask42(%rip),$mask42
+ kmovw %eax,%k1 # used in 2x path
+
+ test %r8,%r8 # is power value impossible?
+ js .Linit_vpmadd52 # if it is, then init R[4]
+
+ vmovq 0($ctx),%x#$H0 # load current hash value
+ vmovq 8($ctx),%x#$H1
+ vmovq 16($ctx),%x#$H2
+
+ test \$3,$len # is length 4*n+2?
+ jnz .Lblocks_vpmadd52_2x_do
+
+.Lblocks_vpmadd52_4x_do:
+ vpbroadcastq 64($ctx),$R0 # load 4th power of the key
+ vpbroadcastq 96($ctx),$R1
+ vpbroadcastq 128($ctx),$R2
+ vpbroadcastq 160($ctx),$S1
+
+.Lblocks_vpmadd52_4x_key_loaded:
+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4
+ vpaddq $R2,$S2,$S2
+ vpsllq \$2,$S2,$S2
+
+ test \$7,$len # is len 8*n?
+ jz .Lblocks_vpmadd52_8x
+
+ vmovdqu64 16*0($inp),$T2 # load data
+ vmovdqu64 16*2($inp),$T3
+ lea 16*4($inp),$inp
+
+ vpunpcklqdq $T3,$T2,$T1 # transpose data
+ vpunpckhqdq $T3,$T2,$T3
+
+ # at this point 64-bit lanes are ordered as 3-1-2-0
+
+ vpsrlq \$24,$T3,$T2 # splat the data
+ vporq $PAD,$T2,$T2
+ vpaddq $T2,$H2,$H2 # accumulate input
+ vpandq $mask44,$T1,$T0
+ vpsrlq \$44,$T1,$T1
+ vpsllq \$20,$T3,$T3
+ vporq $T3,$T1,$T1
+ vpandq $mask44,$T1,$T1
+
+ sub \$4,$len
+ jz .Ltail_vpmadd52_4x
+ jmp .Loop_vpmadd52_4x
+ ud2
+
+.align 32
+.Linit_vpmadd52:
+ vmovq 24($ctx),%x#$S1 # load key
+ vmovq 56($ctx),%x#$H2
+ vmovq 32($ctx),%x#$S2
+ vmovq 40($ctx),%x#$R0
+ vmovq 48($ctx),%x#$R1
+
+ vmovdqa $R0,$H0
+ vmovdqa $R1,$H1
+ vmovdqa $H2,$R2
+
+ mov \$2,%eax
+
+.Lmul_init_vpmadd52:
+ vpxorq $D0lo,$D0lo,$D0lo
+ vpmadd52luq $H2,$S1,$D0lo
+ vpxorq $D0hi,$D0hi,$D0hi
+ vpmadd52huq $H2,$S1,$D0hi
+ vpxorq $D1lo,$D1lo,$D1lo
+ vpmadd52luq $H2,$S2,$D1lo
+ vpxorq $D1hi,$D1hi,$D1hi
+ vpmadd52huq $H2,$S2,$D1hi
+ vpxorq $D2lo,$D2lo,$D2lo
+ vpmadd52luq $H2,$R0,$D2lo
+ vpxorq $D2hi,$D2hi,$D2hi
+ vpmadd52huq $H2,$R0,$D2hi
+
+ vpmadd52luq $H0,$R0,$D0lo
+ vpmadd52huq $H0,$R0,$D0hi
+ vpmadd52luq $H0,$R1,$D1lo
+ vpmadd52huq $H0,$R1,$D1hi
+ vpmadd52luq $H0,$R2,$D2lo
+ vpmadd52huq $H0,$R2,$D2hi
+
+ vpmadd52luq $H1,$S2,$D0lo
+ vpmadd52huq $H1,$S2,$D0hi
+ vpmadd52luq $H1,$R0,$D1lo
+ vpmadd52huq $H1,$R0,$D1hi
+ vpmadd52luq $H1,$R1,$D2lo
+ vpmadd52huq $H1,$R1,$D2hi
+
+ ################################################################
+ # partial reduction
+ vpsrlq \$44,$D0lo,$tmp
+ vpsllq \$8,$D0hi,$D0hi
+ vpandq $mask44,$D0lo,$H0
+ vpaddq $tmp,$D0hi,$D0hi
+
+ vpaddq $D0hi,$D1lo,$D1lo
+
+ vpsrlq \$44,$D1lo,$tmp
+ vpsllq \$8,$D1hi,$D1hi
+ vpandq $mask44,$D1lo,$H1
+ vpaddq $tmp,$D1hi,$D1hi
+
+ vpaddq $D1hi,$D2lo,$D2lo
+
+ vpsrlq \$42,$D2lo,$tmp
+ vpsllq \$10,$D2hi,$D2hi
+ vpandq $mask42,$D2lo,$H2
+ vpaddq $tmp,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+ vpsllq \$2,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+
+ vpsrlq \$44,$H0,$tmp # additional step
+ vpandq $mask44,$H0,$H0
+
+ vpaddq $tmp,$H1,$H1
+
+ dec %eax
+ jz .Ldone_init_vpmadd52
+
+ vpunpcklqdq $R1,$H1,$R1 # 1,2
+ vpbroadcastq %x#$H1,%x#$H1 # 2,2
+ vpunpcklqdq $R2,$H2,$R2
+ vpbroadcastq %x#$H2,%x#$H2
+ vpunpcklqdq $R0,$H0,$R0
+ vpbroadcastq %x#$H0,%x#$H0
+
+ vpsllq \$2,$R1,$S1 # S1 = R1*5*4
+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4
+ vpaddq $R1,$S1,$S1
+ vpaddq $R2,$S2,$S2
+ vpsllq \$2,$S1,$S1
+ vpsllq \$2,$S2,$S2
+
+ jmp .Lmul_init_vpmadd52
+ ud2
+
+.align 32
+.Ldone_init_vpmadd52:
+ vinserti128 \$1,%x#$R1,$H1,$R1 # 1,2,3,4
+ vinserti128 \$1,%x#$R2,$H2,$R2
+ vinserti128 \$1,%x#$R0,$H0,$R0
+
+ vpermq \$0b11011000,$R1,$R1 # 1,3,2,4
+ vpermq \$0b11011000,$R2,$R2
+ vpermq \$0b11011000,$R0,$R0
+
+ vpsllq \$2,$R1,$S1 # S1 = R1*5*4
+ vpaddq $R1,$S1,$S1
+ vpsllq \$2,$S1,$S1
+
+ vmovq 0($ctx),%x#$H0 # load current hash value
+ vmovq 8($ctx),%x#$H1
+ vmovq 16($ctx),%x#$H2
+
+ test \$3,$len # is length 4*n+2?
+ jnz .Ldone_init_vpmadd52_2x
+
+ vmovdqu64 $R0,64($ctx) # save key powers
+ vpbroadcastq %x#$R0,$R0 # broadcast 4th power
+ vmovdqu64 $R1,96($ctx)
+ vpbroadcastq %x#$R1,$R1
+ vmovdqu64 $R2,128($ctx)
+ vpbroadcastq %x#$R2,$R2
+ vmovdqu64 $S1,160($ctx)
+ vpbroadcastq %x#$S1,$S1
+
+ jmp .Lblocks_vpmadd52_4x_key_loaded
+ ud2
+
+.align 32
+.Ldone_init_vpmadd52_2x:
+ vmovdqu64 $R0,64($ctx) # save key powers
+ vpsrldq \$8,$R0,$R0 # 0-1-0-2
+ vmovdqu64 $R1,96($ctx)
+ vpsrldq \$8,$R1,$R1
+ vmovdqu64 $R2,128($ctx)
+ vpsrldq \$8,$R2,$R2
+ vmovdqu64 $S1,160($ctx)
+ vpsrldq \$8,$S1,$S1
+ jmp .Lblocks_vpmadd52_2x_key_loaded
+ ud2
+
+.align 32
+.Lblocks_vpmadd52_2x_do:
+ vmovdqu64 128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers
+ vmovdqu64 160+8($ctx),${S1}{%k1}{z}
+ vmovdqu64 64+8($ctx),${R0}{%k1}{z}
+ vmovdqu64 96+8($ctx),${R1}{%k1}{z}
+
+.Lblocks_vpmadd52_2x_key_loaded:
+ vmovdqu64 16*0($inp),$T2 # load data
+ vpxorq $T3,$T3,$T3
+ lea 16*2($inp),$inp
+
+ vpunpcklqdq $T3,$T2,$T1 # transpose data
+ vpunpckhqdq $T3,$T2,$T3
+
+ # at this point 64-bit lanes are ordered as x-1-x-0
+
+ vpsrlq \$24,$T3,$T2 # splat the data
+ vporq $PAD,$T2,$T2
+ vpaddq $T2,$H2,$H2 # accumulate input
+ vpandq $mask44,$T1,$T0
+ vpsrlq \$44,$T1,$T1
+ vpsllq \$20,$T3,$T3
+ vporq $T3,$T1,$T1
+ vpandq $mask44,$T1,$T1
+
+ jmp .Ltail_vpmadd52_2x
+ ud2
+
+.align 32
+.Loop_vpmadd52_4x:
+ #vpaddq $T2,$H2,$H2 # accumulate input
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$H1,$H1
+
+ vpxorq $D0lo,$D0lo,$D0lo
+ vpmadd52luq $H2,$S1,$D0lo
+ vpxorq $D0hi,$D0hi,$D0hi
+ vpmadd52huq $H2,$S1,$D0hi
+ vpxorq $D1lo,$D1lo,$D1lo
+ vpmadd52luq $H2,$S2,$D1lo
+ vpxorq $D1hi,$D1hi,$D1hi
+ vpmadd52huq $H2,$S2,$D1hi
+ vpxorq $D2lo,$D2lo,$D2lo
+ vpmadd52luq $H2,$R0,$D2lo
+ vpxorq $D2hi,$D2hi,$D2hi
+ vpmadd52huq $H2,$R0,$D2hi
+
+ vmovdqu64 16*0($inp),$T2 # load data
+ vmovdqu64 16*2($inp),$T3
+ lea 16*4($inp),$inp
+ vpmadd52luq $H0,$R0,$D0lo
+ vpmadd52huq $H0,$R0,$D0hi
+ vpmadd52luq $H0,$R1,$D1lo
+ vpmadd52huq $H0,$R1,$D1hi
+ vpmadd52luq $H0,$R2,$D2lo
+ vpmadd52huq $H0,$R2,$D2hi
+
+ vpunpcklqdq $T3,$T2,$T1 # transpose data
+ vpunpckhqdq $T3,$T2,$T3
+ vpmadd52luq $H1,$S2,$D0lo
+ vpmadd52huq $H1,$S2,$D0hi
+ vpmadd52luq $H1,$R0,$D1lo
+ vpmadd52huq $H1,$R0,$D1hi
+ vpmadd52luq $H1,$R1,$D2lo
+ vpmadd52huq $H1,$R1,$D2hi
+
+ ################################################################
+ # partial reduction (interleaved with data splat)
+ vpsrlq \$44,$D0lo,$tmp
+ vpsllq \$8,$D0hi,$D0hi
+ vpandq $mask44,$D0lo,$H0
+ vpaddq $tmp,$D0hi,$D0hi
+
+ vpsrlq \$24,$T3,$T2
+ vporq $PAD,$T2,$T2
+ vpaddq $D0hi,$D1lo,$D1lo
+
+ vpsrlq \$44,$D1lo,$tmp
+ vpsllq \$8,$D1hi,$D1hi
+ vpandq $mask44,$D1lo,$H1
+ vpaddq $tmp,$D1hi,$D1hi
+
+ vpandq $mask44,$T1,$T0
+ vpsrlq \$44,$T1,$T1
+ vpsllq \$20,$T3,$T3
+ vpaddq $D1hi,$D2lo,$D2lo
+
+ vpsrlq \$42,$D2lo,$tmp
+ vpsllq \$10,$D2hi,$D2hi
+ vpandq $mask42,$D2lo,$H2
+ vpaddq $tmp,$D2hi,$D2hi
+
+ vpaddq $T2,$H2,$H2 # accumulate input
+ vpaddq $D2hi,$H0,$H0
+ vpsllq \$2,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+ vporq $T3,$T1,$T1
+ vpandq $mask44,$T1,$T1
+
+ vpsrlq \$44,$H0,$tmp # additional step
+ vpandq $mask44,$H0,$H0
+
+ vpaddq $tmp,$H1,$H1
+
+ sub \$4,$len # len-=64
+ jnz .Loop_vpmadd52_4x
+
+.Ltail_vpmadd52_4x:
+ vmovdqu64 128($ctx),$R2 # load all key powers
+ vmovdqu64 160($ctx),$S1
+ vmovdqu64 64($ctx),$R0
+ vmovdqu64 96($ctx),$R1
+
+.Ltail_vpmadd52_2x:
+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4
+ vpaddq $R2,$S2,$S2
+ vpsllq \$2,$S2,$S2
+
+ #vpaddq $T2,$H2,$H2 # accumulate input
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$H1,$H1
+
+ vpxorq $D0lo,$D0lo,$D0lo
+ vpmadd52luq $H2,$S1,$D0lo
+ vpxorq $D0hi,$D0hi,$D0hi
+ vpmadd52huq $H2,$S1,$D0hi
+ vpxorq $D1lo,$D1lo,$D1lo
+ vpmadd52luq $H2,$S2,$D1lo
+ vpxorq $D1hi,$D1hi,$D1hi
+ vpmadd52huq $H2,$S2,$D1hi
+ vpxorq $D2lo,$D2lo,$D2lo
+ vpmadd52luq $H2,$R0,$D2lo
+ vpxorq $D2hi,$D2hi,$D2hi
+ vpmadd52huq $H2,$R0,$D2hi
+
+ vpmadd52luq $H0,$R0,$D0lo
+ vpmadd52huq $H0,$R0,$D0hi
+ vpmadd52luq $H0,$R1,$D1lo
+ vpmadd52huq $H0,$R1,$D1hi
+ vpmadd52luq $H0,$R2,$D2lo
+ vpmadd52huq $H0,$R2,$D2hi
+
+ vpmadd52luq $H1,$S2,$D0lo
+ vpmadd52huq $H1,$S2,$D0hi
+ vpmadd52luq $H1,$R0,$D1lo
+ vpmadd52huq $H1,$R0,$D1hi
+ vpmadd52luq $H1,$R1,$D2lo
+ vpmadd52huq $H1,$R1,$D2hi
+
+ ################################################################
+ # horizontal addition
+
+ mov \$1,%eax
+ kmovw %eax,%k1
+ vpsrldq \$8,$D0lo,$T0
+ vpsrldq \$8,$D0hi,$H0
+ vpsrldq \$8,$D1lo,$T1
+ vpsrldq \$8,$D1hi,$H1
+ vpaddq $T0,$D0lo,$D0lo
+ vpaddq $H0,$D0hi,$D0hi
+ vpsrldq \$8,$D2lo,$T2
+ vpsrldq \$8,$D2hi,$H2
+ vpaddq $T1,$D1lo,$D1lo
+ vpaddq $H1,$D1hi,$D1hi
+ vpermq \$0x2,$D0lo,$T0
+ vpermq \$0x2,$D0hi,$H0
+ vpaddq $T2,$D2lo,$D2lo
+ vpaddq $H2,$D2hi,$D2hi
+
+ vpermq \$0x2,$D1lo,$T1
+ vpermq \$0x2,$D1hi,$H1
+ vpaddq $T0,$D0lo,${D0lo}{%k1}{z}
+ vpaddq $H0,$D0hi,${D0hi}{%k1}{z}
+ vpermq \$0x2,$D2lo,$T2
+ vpermq \$0x2,$D2hi,$H2
+ vpaddq $T1,$D1lo,${D1lo}{%k1}{z}
+ vpaddq $H1,$D1hi,${D1hi}{%k1}{z}
+ vpaddq $T2,$D2lo,${D2lo}{%k1}{z}
+ vpaddq $H2,$D2hi,${D2hi}{%k1}{z}
+
+ ################################################################
+ # partial reduction
+ vpsrlq \$44,$D0lo,$tmp
+ vpsllq \$8,$D0hi,$D0hi
+ vpandq $mask44,$D0lo,$H0
+ vpaddq $tmp,$D0hi,$D0hi
+
+ vpaddq $D0hi,$D1lo,$D1lo
+
+ vpsrlq \$44,$D1lo,$tmp
+ vpsllq \$8,$D1hi,$D1hi
+ vpandq $mask44,$D1lo,$H1
+ vpaddq $tmp,$D1hi,$D1hi
+
+ vpaddq $D1hi,$D2lo,$D2lo
+
+ vpsrlq \$42,$D2lo,$tmp
+ vpsllq \$10,$D2hi,$D2hi
+ vpandq $mask42,$D2lo,$H2
+ vpaddq $tmp,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+ vpsllq \$2,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+
+ vpsrlq \$44,$H0,$tmp # additional step
+ vpandq $mask44,$H0,$H0
+
+ vpaddq $tmp,$H1,$H1
+ # at this point $len is
+ # either 4*n+2 or 0...
+ sub \$2,$len # len-=32
+ ja .Lblocks_vpmadd52_4x_do
+
+ vmovq %x#$H0,0($ctx)
+ vmovq %x#$H1,8($ctx)
+ vmovq %x#$H2,16($ctx)
+ vzeroall
+
+.Lno_data_vpmadd52_4x:
+ ret
+.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
+___
+}
+{
+########################################################################
+# As implied by its name 8x subroutine processes 8 blocks in parallel...
+# This is intermediate version, as it's used only in cases when input
+# length is either 8*n, 8*n+1 or 8*n+2...
+
+my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
+my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
+my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
+my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10));
+
+$code.=<<___;
+.type poly1305_blocks_vpmadd52_8x,\@function,4
+.align 32
+poly1305_blocks_vpmadd52_8x:
+ shr \$4,$len
+ jz .Lno_data_vpmadd52_8x # too short
+
+ shl \$40,$padbit
+ mov 64($ctx),%r8 # peek on power of the key
+
+ vmovdqa64 .Lx_mask44(%rip),$mask44
+ vmovdqa64 .Lx_mask42(%rip),$mask42
+
+ test %r8,%r8 # is power value impossible?
+ js .Linit_vpmadd52 # if it is, then init R[4]
+
+ vmovq 0($ctx),%x#$H0 # load current hash value
+ vmovq 8($ctx),%x#$H1
+ vmovq 16($ctx),%x#$H2
+
+.Lblocks_vpmadd52_8x:
+ ################################################################
+ # fist we calculate more key powers
+
+ vmovdqu64 128($ctx),$R2 # load 1-3-2-4 powers
+ vmovdqu64 160($ctx),$S1
+ vmovdqu64 64($ctx),$R0
+ vmovdqu64 96($ctx),$R1
+
+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4
+ vpaddq $R2,$S2,$S2
+ vpsllq \$2,$S2,$S2
+
+ vpbroadcastq %x#$R2,$RR2 # broadcast 4th power
+ vpbroadcastq %x#$R0,$RR0
+ vpbroadcastq %x#$R1,$RR1
+
+ vpxorq $D0lo,$D0lo,$D0lo
+ vpmadd52luq $RR2,$S1,$D0lo
+ vpxorq $D0hi,$D0hi,$D0hi
+ vpmadd52huq $RR2,$S1,$D0hi
+ vpxorq $D1lo,$D1lo,$D1lo
+ vpmadd52luq $RR2,$S2,$D1lo
+ vpxorq $D1hi,$D1hi,$D1hi
+ vpmadd52huq $RR2,$S2,$D1hi
+ vpxorq $D2lo,$D2lo,$D2lo
+ vpmadd52luq $RR2,$R0,$D2lo
+ vpxorq $D2hi,$D2hi,$D2hi
+ vpmadd52huq $RR2,$R0,$D2hi
+
+ vpmadd52luq $RR0,$R0,$D0lo
+ vpmadd52huq $RR0,$R0,$D0hi
+ vpmadd52luq $RR0,$R1,$D1lo
+ vpmadd52huq $RR0,$R1,$D1hi
+ vpmadd52luq $RR0,$R2,$D2lo
+ vpmadd52huq $RR0,$R2,$D2hi
+
+ vpmadd52luq $RR1,$S2,$D0lo
+ vpmadd52huq $RR1,$S2,$D0hi
+ vpmadd52luq $RR1,$R0,$D1lo
+ vpmadd52huq $RR1,$R0,$D1hi
+ vpmadd52luq $RR1,$R1,$D2lo
+ vpmadd52huq $RR1,$R1,$D2hi
+
+ ################################################################
+ # partial reduction
+ vpsrlq \$44,$D0lo,$tmp
+ vpsllq \$8,$D0hi,$D0hi
+ vpandq $mask44,$D0lo,$RR0
+ vpaddq $tmp,$D0hi,$D0hi
+
+ vpaddq $D0hi,$D1lo,$D1lo
+
+ vpsrlq \$44,$D1lo,$tmp
+ vpsllq \$8,$D1hi,$D1hi
+ vpandq $mask44,$D1lo,$RR1
+ vpaddq $tmp,$D1hi,$D1hi
+
+ vpaddq $D1hi,$D2lo,$D2lo
+
+ vpsrlq \$42,$D2lo,$tmp
+ vpsllq \$10,$D2hi,$D2hi
+ vpandq $mask42,$D2lo,$RR2
+ vpaddq $tmp,$D2hi,$D2hi
+
+ vpaddq $D2hi,$RR0,$RR0
+ vpsllq \$2,$D2hi,$D2hi
+
+ vpaddq $D2hi,$RR0,$RR0
+
+ vpsrlq \$44,$RR0,$tmp # additional step
+ vpandq $mask44,$RR0,$RR0
+
+ vpaddq $tmp,$RR1,$RR1
+
+ ################################################################
+ # At this point Rx holds 1324 powers, RRx - 5768, and the goal
+ # is 15263748, which reflects how data is loaded...
+
+ vpunpcklqdq $R2,$RR2,$T2 # 3748
+ vpunpckhqdq $R2,$RR2,$R2 # 1526
+ vpunpcklqdq $R0,$RR0,$T0
+ vpunpckhqdq $R0,$RR0,$R0
+ vpunpcklqdq $R1,$RR1,$T1
+ vpunpckhqdq $R1,$RR1,$R1
+___
+######## switch to %zmm
+map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
+map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
+map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
+map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2);
+
+$code.=<<___;
+ vshufi64x2 \$0x44,$R2,$T2,$RR2 # 15263748
+ vshufi64x2 \$0x44,$R0,$T0,$RR0
+ vshufi64x2 \$0x44,$R1,$T1,$RR1
+
+ vmovdqu64 16*0($inp),$T2 # load data
+ vmovdqu64 16*4($inp),$T3
+ lea 16*8($inp),$inp
+
+ vpsllq \$2,$RR2,$SS2 # S2 = R2*5*4
+ vpsllq \$2,$RR1,$SS1 # S1 = R1*5*4
+ vpaddq $RR2,$SS2,$SS2
+ vpaddq $RR1,$SS1,$SS1
+ vpsllq \$2,$SS2,$SS2
+ vpsllq \$2,$SS1,$SS1
+
+ vpbroadcastq $padbit,$PAD
+ vpbroadcastq %x#$mask44,$mask44
+ vpbroadcastq %x#$mask42,$mask42
+
+ vpbroadcastq %x#$SS1,$S1 # broadcast 8th power
+ vpbroadcastq %x#$SS2,$S2
+ vpbroadcastq %x#$RR0,$R0
+ vpbroadcastq %x#$RR1,$R1
+ vpbroadcastq %x#$RR2,$R2
+
+ vpunpcklqdq $T3,$T2,$T1 # transpose data
+ vpunpckhqdq $T3,$T2,$T3
+
+ # at this point 64-bit lanes are ordered as 73625140
+
+ vpsrlq \$24,$T3,$T2 # splat the data
+ vporq $PAD,$T2,$T2
+ vpaddq $T2,$H2,$H2 # accumulate input
+ vpandq $mask44,$T1,$T0
+ vpsrlq \$44,$T1,$T1
+ vpsllq \$20,$T3,$T3
+ vporq $T3,$T1,$T1
+ vpandq $mask44,$T1,$T1
+
+ sub \$8,$len
+ jz .Ltail_vpmadd52_8x
+ jmp .Loop_vpmadd52_8x
+
+.align 32
+.Loop_vpmadd52_8x:
+ #vpaddq $T2,$H2,$H2 # accumulate input
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$H1,$H1
+
+ vpxorq $D0lo,$D0lo,$D0lo
+ vpmadd52luq $H2,$S1,$D0lo
+ vpxorq $D0hi,$D0hi,$D0hi
+ vpmadd52huq $H2,$S1,$D0hi
+ vpxorq $D1lo,$D1lo,$D1lo
+ vpmadd52luq $H2,$S2,$D1lo
+ vpxorq $D1hi,$D1hi,$D1hi
+ vpmadd52huq $H2,$S2,$D1hi
+ vpxorq $D2lo,$D2lo,$D2lo
+ vpmadd52luq $H2,$R0,$D2lo
+ vpxorq $D2hi,$D2hi,$D2hi
+ vpmadd52huq $H2,$R0,$D2hi
+
+ vmovdqu64 16*0($inp),$T2 # load data
+ vmovdqu64 16*4($inp),$T3
+ lea 16*8($inp),$inp
+ vpmadd52luq $H0,$R0,$D0lo
+ vpmadd52huq $H0,$R0,$D0hi
+ vpmadd52luq $H0,$R1,$D1lo
+ vpmadd52huq $H0,$R1,$D1hi
+ vpmadd52luq $H0,$R2,$D2lo
+ vpmadd52huq $H0,$R2,$D2hi
+
+ vpunpcklqdq $T3,$T2,$T1 # transpose data
+ vpunpckhqdq $T3,$T2,$T3
+ vpmadd52luq $H1,$S2,$D0lo
+ vpmadd52huq $H1,$S2,$D0hi
+ vpmadd52luq $H1,$R0,$D1lo
+ vpmadd52huq $H1,$R0,$D1hi
+ vpmadd52luq $H1,$R1,$D2lo
+ vpmadd52huq $H1,$R1,$D2hi
+
+ ################################################################
+ # partial reduction (interleaved with data splat)
+ vpsrlq \$44,$D0lo,$tmp
+ vpsllq \$8,$D0hi,$D0hi
+ vpandq $mask44,$D0lo,$H0
+ vpaddq $tmp,$D0hi,$D0hi
+
+ vpsrlq \$24,$T3,$T2
+ vporq $PAD,$T2,$T2
+ vpaddq $D0hi,$D1lo,$D1lo
+
+ vpsrlq \$44,$D1lo,$tmp
+ vpsllq \$8,$D1hi,$D1hi
+ vpandq $mask44,$D1lo,$H1
+ vpaddq $tmp,$D1hi,$D1hi
+
+ vpandq $mask44,$T1,$T0
+ vpsrlq \$44,$T1,$T1
+ vpsllq \$20,$T3,$T3
+ vpaddq $D1hi,$D2lo,$D2lo
+
+ vpsrlq \$42,$D2lo,$tmp
+ vpsllq \$10,$D2hi,$D2hi
+ vpandq $mask42,$D2lo,$H2
+ vpaddq $tmp,$D2hi,$D2hi
+
+ vpaddq $T2,$H2,$H2 # accumulate input
+ vpaddq $D2hi,$H0,$H0
+ vpsllq \$2,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+ vporq $T3,$T1,$T1
+ vpandq $mask44,$T1,$T1
+
+ vpsrlq \$44,$H0,$tmp # additional step
+ vpandq $mask44,$H0,$H0
+
+ vpaddq $tmp,$H1,$H1
+
+ sub \$8,$len # len-=128
+ jnz .Loop_vpmadd52_8x
+
+.Ltail_vpmadd52_8x:
+ #vpaddq $T2,$H2,$H2 # accumulate input
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$H1,$H1
+
+ vpxorq $D0lo,$D0lo,$D0lo
+ vpmadd52luq $H2,$SS1,$D0lo
+ vpxorq $D0hi,$D0hi,$D0hi
+ vpmadd52huq $H2,$SS1,$D0hi
+ vpxorq $D1lo,$D1lo,$D1lo
+ vpmadd52luq $H2,$SS2,$D1lo
+ vpxorq $D1hi,$D1hi,$D1hi
+ vpmadd52huq $H2,$SS2,$D1hi
+ vpxorq $D2lo,$D2lo,$D2lo
+ vpmadd52luq $H2,$RR0,$D2lo
+ vpxorq $D2hi,$D2hi,$D2hi
+ vpmadd52huq $H2,$RR0,$D2hi
+
+ vpmadd52luq $H0,$RR0,$D0lo
+ vpmadd52huq $H0,$RR0,$D0hi
+ vpmadd52luq $H0,$RR1,$D1lo
+ vpmadd52huq $H0,$RR1,$D1hi
+ vpmadd52luq $H0,$RR2,$D2lo
+ vpmadd52huq $H0,$RR2,$D2hi
+
+ vpmadd52luq $H1,$SS2,$D0lo
+ vpmadd52huq $H1,$SS2,$D0hi
+ vpmadd52luq $H1,$RR0,$D1lo
+ vpmadd52huq $H1,$RR0,$D1hi
+ vpmadd52luq $H1,$RR1,$D2lo
+ vpmadd52huq $H1,$RR1,$D2hi
+
+ ################################################################
+ # horizontal addition
+
+ mov \$1,%eax
+ kmovw %eax,%k1
+ vpsrldq \$8,$D0lo,$T0
+ vpsrldq \$8,$D0hi,$H0
+ vpsrldq \$8,$D1lo,$T1
+ vpsrldq \$8,$D1hi,$H1
+ vpaddq $T0,$D0lo,$D0lo
+ vpaddq $H0,$D0hi,$D0hi
+ vpsrldq \$8,$D2lo,$T2
+ vpsrldq \$8,$D2hi,$H2
+ vpaddq $T1,$D1lo,$D1lo
+ vpaddq $H1,$D1hi,$D1hi
+ vpermq \$0x2,$D0lo,$T0
+ vpermq \$0x2,$D0hi,$H0
+ vpaddq $T2,$D2lo,$D2lo
+ vpaddq $H2,$D2hi,$D2hi
+
+ vpermq \$0x2,$D1lo,$T1
+ vpermq \$0x2,$D1hi,$H1
+ vpaddq $T0,$D0lo,$D0lo
+ vpaddq $H0,$D0hi,$D0hi
+ vpermq \$0x2,$D2lo,$T2
+ vpermq \$0x2,$D2hi,$H2
+ vpaddq $T1,$D1lo,$D1lo
+ vpaddq $H1,$D1hi,$D1hi
+ vextracti64x4 \$1,$D0lo,%y#$T0
+ vextracti64x4 \$1,$D0hi,%y#$H0
+ vpaddq $T2,$D2lo,$D2lo
+ vpaddq $H2,$D2hi,$D2hi
+
+ vextracti64x4 \$1,$D1lo,%y#$T1
+ vextracti64x4 \$1,$D1hi,%y#$H1
+ vextracti64x4 \$1,$D2lo,%y#$T2
+ vextracti64x4 \$1,$D2hi,%y#$H2
+___
+######## switch back to %ymm
+map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
+map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
+map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
+
+$code.=<<___;
+ vpaddq $T0,$D0lo,${D0lo}{%k1}{z}
+ vpaddq $H0,$D0hi,${D0hi}{%k1}{z}
+ vpaddq $T1,$D1lo,${D1lo}{%k1}{z}
+ vpaddq $H1,$D1hi,${D1hi}{%k1}{z}
+ vpaddq $T2,$D2lo,${D2lo}{%k1}{z}
+ vpaddq $H2,$D2hi,${D2hi}{%k1}{z}
+
+ ################################################################
+ # partial reduction
+ vpsrlq \$44,$D0lo,$tmp
+ vpsllq \$8,$D0hi,$D0hi
+ vpandq $mask44,$D0lo,$H0
+ vpaddq $tmp,$D0hi,$D0hi
+
+ vpaddq $D0hi,$D1lo,$D1lo
+
+ vpsrlq \$44,$D1lo,$tmp
+ vpsllq \$8,$D1hi,$D1hi
+ vpandq $mask44,$D1lo,$H1
+ vpaddq $tmp,$D1hi,$D1hi
+
+ vpaddq $D1hi,$D2lo,$D2lo
+
+ vpsrlq \$42,$D2lo,$tmp
+ vpsllq \$10,$D2hi,$D2hi
+ vpandq $mask42,$D2lo,$H2
+ vpaddq $tmp,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+ vpsllq \$2,$D2hi,$D2hi
+
+ vpaddq $D2hi,$H0,$H0
+
+ vpsrlq \$44,$H0,$tmp # additional step
+ vpandq $mask44,$H0,$H0
+
+ vpaddq $tmp,$H1,$H1
+
+ ################################################################
+
+ vmovq %x#$H0,0($ctx)
+ vmovq %x#$H1,8($ctx)
+ vmovq %x#$H2,16($ctx)
+ vzeroall
+
+.Lno_data_vpmadd52_8x:
+ ret
+.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
+___
+}
+$code.=<<___;
+.type poly1305_emit_base2_44,\@function,3
+.align 32
+poly1305_emit_base2_44:
+ mov 0($ctx),%r8 # load hash value
+ mov 8($ctx),%r9
+ mov 16($ctx),%r10
+
+ mov %r9,%rax
+ shr \$20,%r9
+ shl \$44,%rax
+ mov %r10,%rcx
+ shr \$40,%r10
+ shl \$24,%rcx
+
+ add %rax,%r8
+ adc %rcx,%r9
+ adc \$0,%r10
+
+ mov %r8,%rax
+ add \$5,%r8 # compare to modulus
+ mov %r9,%rcx
+ adc \$0,%r9
+ adc \$0,%r10
+ shr \$2,%r10 # did 130-bit value overflow?
+ cmovnz %r8,%rax
+ cmovnz %r9,%rcx
+
+ add 0($nonce),%rax # accumulate nonce
+ adc 8($nonce),%rcx
+ mov %rax,0($mac) # write result
+ mov %rcx,8($mac)
+
+ ret
+.size poly1305_emit_base2_44,.-poly1305_emit_base2_44
+___
+} } }
+}
+
+if (!$kernel)
+{ # chacha20-poly1305 helpers
+my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+$code.=<<___;
+.globl xor128_encrypt_n_pad
+.type xor128_encrypt_n_pad,\@abi-omnipotent
+.align 16
+xor128_encrypt_n_pad:
+ sub $otp,$inp
+ sub $otp,$out
+ mov $len,%r10 # put len aside
+ shr \$4,$len # len / 16
+ jz .Ltail_enc
+ nop
+.Loop_enc_xmm:
+ movdqu ($inp,$otp),%xmm0
+ pxor ($otp),%xmm0
+ movdqu %xmm0,($out,$otp)
+ movdqa %xmm0,($otp)
+ lea 16($otp),$otp
+ dec $len
+ jnz .Loop_enc_xmm
+
+ and \$15,%r10 # len % 16
+ jz .Ldone_enc
+
+.Ltail_enc:
+ mov \$16,$len
+ sub %r10,$len
+ xor %eax,%eax
+.Loop_enc_byte:
+ mov ($inp,$otp),%al
+ xor ($otp),%al
+ mov %al,($out,$otp)
+ mov %al,($otp)
+ lea 1($otp),$otp
+ dec %r10
+ jnz .Loop_enc_byte
+
+ xor %eax,%eax
+.Loop_enc_pad:
+ mov %al,($otp)
+ lea 1($otp),$otp
+ dec $len
+ jnz .Loop_enc_pad
+
+.Ldone_enc:
+ mov $otp,%rax
+ ret
+.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
+
+.globl xor128_decrypt_n_pad
+.type xor128_decrypt_n_pad,\@abi-omnipotent
+.align 16
+xor128_decrypt_n_pad:
+ sub $otp,$inp
+ sub $otp,$out
+ mov $len,%r10 # put len aside
+ shr \$4,$len # len / 16
+ jz .Ltail_dec
+ nop
+.Loop_dec_xmm:
+ movdqu ($inp,$otp),%xmm0
+ movdqa ($otp),%xmm1
+ pxor %xmm0,%xmm1
+ movdqu %xmm1,($out,$otp)
+ movdqa %xmm0,($otp)
+ lea 16($otp),$otp
+ dec $len
+ jnz .Loop_dec_xmm
+
+ pxor %xmm1,%xmm1
+ and \$15,%r10 # len % 16
+ jz .Ldone_dec
+
+.Ltail_dec:
+ mov \$16,$len
+ sub %r10,$len
+ xor %eax,%eax
+ xor %r11,%r11
+.Loop_dec_byte:
+ mov ($inp,$otp),%r11b
+ mov ($otp),%al
+ xor %r11b,%al
+ mov %al,($out,$otp)
+ mov %r11b,($otp)
+ lea 1($otp),$otp
+ dec %r10
+ jnz .Loop_dec_byte
+
+ xor %eax,%eax
+.Loop_dec_pad:
+ mov %al,($otp)
+ lea 1($otp),$otp
+ dec $len
+ jnz .Loop_dec_pad
+
+.Ldone_dec:
+ mov $otp,%rax
+ ret
+.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
+___
+}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lcommon_seh_tail
+
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R14
+
+ jmp .Lcommon_seh_tail
+.size se_handler,.-se_handler
+
+.type avx_handler,\@abi-omnipotent
+.align 16
+avx_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 208($context),%rax # pull context->R11
+
+ lea 0x50(%rax),%rsi
+ lea 0xf8(%rax),%rax
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size avx_handler,.-avx_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_poly1305_init_x86_64
+ .rva .LSEH_end_poly1305_init_x86_64
+ .rva .LSEH_info_poly1305_init_x86_64
+
+ .rva .LSEH_begin_poly1305_blocks_x86_64
+ .rva .LSEH_end_poly1305_blocks_x86_64
+ .rva .LSEH_info_poly1305_blocks_x86_64
+
+ .rva .LSEH_begin_poly1305_emit_x86_64
+ .rva .LSEH_end_poly1305_emit_x86_64
+ .rva .LSEH_info_poly1305_emit_x86_64
+___
+$code.=<<___ if ($avx);
+ .rva .LSEH_begin_poly1305_blocks_avx
+ .rva .Lbase2_64_avx
+ .rva .LSEH_info_poly1305_blocks_avx_1
+
+ .rva .Lbase2_64_avx
+ .rva .Leven_avx
+ .rva .LSEH_info_poly1305_blocks_avx_2
+
+ .rva .Leven_avx
+ .rva .LSEH_end_poly1305_blocks_avx
+ .rva .LSEH_info_poly1305_blocks_avx_3
+
+ .rva .LSEH_begin_poly1305_emit_avx
+ .rva .LSEH_end_poly1305_emit_avx
+ .rva .LSEH_info_poly1305_emit_avx
+___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_poly1305_blocks_avx2
+ .rva .Lbase2_64_avx2
+ .rva .LSEH_info_poly1305_blocks_avx2_1
+
+ .rva .Lbase2_64_avx2
+ .rva .Leven_avx2
+ .rva .LSEH_info_poly1305_blocks_avx2_2
+
+ .rva .Leven_avx2
+ .rva .LSEH_end_poly1305_blocks_avx2
+ .rva .LSEH_info_poly1305_blocks_avx2_3
+___
+$code.=<<___ if ($avx>2);
+ .rva .LSEH_begin_poly1305_blocks_avx512
+ .rva .LSEH_end_poly1305_blocks_avx512
+ .rva .LSEH_info_poly1305_blocks_avx512
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_poly1305_init_x86_64:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64
+
+.LSEH_info_poly1305_blocks_x86_64:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lblocks_body,.Lblocks_epilogue
+
+.LSEH_info_poly1305_emit_x86_64:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .LSEH_begin_poly1305_emit_x86_64,.LSEH_begin_poly1305_emit_x86_64
+___
+$code.=<<___ if ($avx);
+.LSEH_info_poly1305_blocks_avx_1:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx_2:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx_3:
+ .byte 9,0,0,0
+ .rva avx_handler
+ .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_emit_avx:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_poly1305_blocks_avx2_1:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx2_2:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx2_3:
+ .byte 9,0,0,0
+ .rva avx_handler
+ .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
+___
+$code.=<<___ if ($avx>2);
+.LSEH_info_poly1305_blocks_avx512:
+ .byte 9,0,0,0
+ .rva avx_handler
+ .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[]
+___
+}
+
+open SELF,$0;
+while(<SELF>) {
+ next if (/^#!/);
+ last if (!s/^#/\/\// and !/^$/);
+ print;
+}
+close SELF;
+
+foreach (split('\n',$code)) {
+ s/\`([^\`]*)\`/eval($1)/ge;
+ s/%r([a-z]+)#d/%e$1/g;
+ s/%r([0-9]+)#d/%r$1d/g;
+ s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
+
+ if ($kernel) {
+ s/(^\.type.*),[0-9]+$/\1/;
+ s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/;
+ next if /^\.cfi.*/;
+ }
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 0cc4537e6617..79bb58737d52 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -1,8 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Poly1305 authenticator algorithm, RFC7539, SIMD glue code
- *
- * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include <crypto/algapi.h>
@@ -13,108 +11,166 @@
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <asm/intel-family.h>
#include <asm/simd.h>
-asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
- const u32 *r, unsigned int blocks);
-asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
- unsigned int blocks, const u32 *u);
-asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
- unsigned int blocks, const u32 *u);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd);
+asmlinkage void poly1305_init_x86_64(void *ctx,
+ const u8 key[POLY1305_KEY_SIZE]);
+asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
+ const size_t len, const u32 padbit);
+asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len,
+ const u32 padbit);
+asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len,
+ const u32 padbit);
+asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp,
+ const size_t len, const u32 padbit);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512);
+
+struct poly1305_arch_internal {
+ union {
+ struct {
+ u32 h[5];
+ u32 is_base2_26;
+ };
+ u64 hs[3];
+ };
+ u64 r[2];
+ u64 pad;
+ struct { u32 r2, r1, r4, r3; } rn[9];
+};
-static void poly1305_simd_mult(u32 *a, const u32 *b)
+/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit
+ * the unfortunate situation of using AVX and then having to go back to scalar
+ * -- because the user is silly and has called the update function from two
+ * separate contexts -- then we need to convert back to the original base before
+ * proceeding. It is possible to reason that the initial reduction below is
+ * sufficient given the implementation invariants. However, for an avoidance of
+ * doubt and because this is not performance critical, we do the full reduction
+ * anyway. Z3 proof of below function: https://xn--4db.cc/ltPtHCKN/py
+ */
+static void convert_to_base2_64(void *ctx)
{
- u8 m[POLY1305_BLOCK_SIZE];
-
- memset(m, 0, sizeof(m));
- /* The poly1305 block function adds a hi-bit to the accumulator which
- * we don't need for key multiplication; compensate for it. */
- a[4] -= 1 << 24;
- poly1305_block_sse2(a, m, b, 1);
+ struct poly1305_arch_internal *state = ctx;
+ u32 cy;
+
+ if (!state->is_base2_26)
+ return;
+
+ cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
+ cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
+ cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
+ cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
+ state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
+ state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
+ state->hs[2] = state->h[4] >> 24;
+#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
+ cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL);
+ state->hs[2] &= 3;
+ state->hs[0] += cy;
+ state->hs[1] += (cy = ULT(state->hs[0], cy));
+ state->hs[2] += ULT(state->hs[1], cy);
+#undef ULT
+ state->is_base2_26 = 0;
}
-static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
+static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
{
- unsigned int datalen;
-
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_blocks(&dctx->h, dctx->r, src,
- srclen / POLY1305_BLOCK_SIZE, 1);
- srclen %= POLY1305_BLOCK_SIZE;
- }
- return srclen;
+ poly1305_init_x86_64(ctx, key);
}
-static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
+static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
+ const u32 padbit)
{
- unsigned int blocks, datalen;
-
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
+ struct poly1305_arch_internal *state = ctx;
+
+ /* SIMD disables preemption, so relax after processing each page. */
+ BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
+ PAGE_SIZE % POLY1305_BLOCK_SIZE);
+
+ if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) ||
+ (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
+ !crypto_simd_usable()) {
+ convert_to_base2_64(ctx);
+ poly1305_blocks_x86_64(ctx, inp, len, padbit);
+ return;
}
- if (IS_ENABLED(CONFIG_AS_AVX2) &&
- static_branch_likely(&poly1305_use_avx2) &&
- srclen >= POLY1305_BLOCK_SIZE * 4) {
- if (unlikely(dctx->rset < 4)) {
- if (dctx->rset < 2) {
- dctx->r[1] = dctx->r[0];
- poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
- }
- dctx->r[2] = dctx->r[1];
- poly1305_simd_mult(dctx->r[2].r, dctx->r[0].r);
- dctx->r[3] = dctx->r[2];
- poly1305_simd_mult(dctx->r[3].r, dctx->r[0].r);
- dctx->rset = 4;
- }
- blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
- poly1305_4block_avx2(dctx->h.h, src, dctx->r[0].r, blocks,
- dctx->r[1].r);
- src += POLY1305_BLOCK_SIZE * 4 * blocks;
- srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
+ for (;;) {
+ const size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+ kernel_fpu_begin();
+ if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
+ poly1305_blocks_avx512(ctx, inp, bytes, padbit);
+ else if (IS_ENABLED(CONFIG_AS_AVX2) && static_branch_likely(&poly1305_use_avx2))
+ poly1305_blocks_avx2(ctx, inp, bytes, padbit);
+ else
+ poly1305_blocks_avx(ctx, inp, bytes, padbit);
+ kernel_fpu_end();
+ len -= bytes;
+ if (!len)
+ break;
+ inp += bytes;
}
+}
- if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
- if (unlikely(dctx->rset < 2)) {
- dctx->r[1] = dctx->r[0];
- poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r);
- dctx->rset = 2;
- }
- blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
- poly1305_2block_sse2(dctx->h.h, src, dctx->r[0].r,
- blocks, dctx->r[1].r);
- src += POLY1305_BLOCK_SIZE * 2 * blocks;
- srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_block_sse2(dctx->h.h, src, dctx->r[0].r, 1);
- srclen -= POLY1305_BLOCK_SIZE;
- }
- return srclen;
+static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx))
+ poly1305_emit_x86_64(ctx, mac, nonce);
+ else
+ poly1305_emit_avx(ctx, mac, nonce);
}
-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
{
- poly1305_init_generic(desc, key);
+ poly1305_simd_init(&dctx->h, key);
+ dctx->s[0] = get_unaligned_le32(&key[16]);
+ dctx->s[1] = get_unaligned_le32(&key[20]);
+ dctx->s[2] = get_unaligned_le32(&key[24]);
+ dctx->s[3] = get_unaligned_le32(&key[28]);
+ dctx->buflen = 0;
+ dctx->sset = true;
}
EXPORT_SYMBOL(poly1305_init_arch);
+static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
+ const u8 *inp, unsigned int len)
+{
+ unsigned int acc = 0;
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
+ poly1305_simd_init(&dctx->h, inp);
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ acc += POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (len >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(&inp[0]);
+ dctx->s[1] = get_unaligned_le32(&inp[4]);
+ dctx->s[2] = get_unaligned_le32(&inp[8]);
+ dctx->s[3] = get_unaligned_le32(&inp[12]);
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ acc += POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ }
+ return acc;
+}
+
void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
unsigned int srclen)
{
- unsigned int bytes;
+ unsigned int bytes, used;
if (unlikely(dctx->buflen)) {
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
@@ -124,31 +180,19 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- if (static_branch_likely(&poly1305_use_simd) &&
- likely(crypto_simd_usable())) {
- kernel_fpu_begin();
- poly1305_simd_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
- kernel_fpu_end();
- } else {
- poly1305_scalar_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
- }
+ if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE)))
+ poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1);
dctx->buflen = 0;
}
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- if (static_branch_likely(&poly1305_use_simd) &&
- likely(crypto_simd_usable())) {
- kernel_fpu_begin();
- bytes = poly1305_simd_blocks(dctx, src, srclen);
- kernel_fpu_end();
- } else {
- bytes = poly1305_scalar_blocks(dctx, src, srclen);
- }
- src += srclen - bytes;
- srclen = bytes;
+ bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
+ srclen -= bytes;
+ used = crypto_poly1305_setdctxkey(dctx, src, bytes);
+ if (likely(bytes - used))
+ poly1305_simd_blocks(&dctx->h, src + used, bytes - used, 1);
+ src += bytes;
}
if (unlikely(srclen)) {
@@ -158,9 +202,17 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
}
EXPORT_SYMBOL(poly1305_update_arch);
-void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest)
+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
{
- poly1305_final_generic(desc, digest);
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_simd_emit(&dctx->h, dst, dctx->s);
+ *dctx = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL(poly1305_final_arch);
@@ -168,38 +220,34 @@ static int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
+ *dctx = (struct poly1305_desc_ctx){};
return 0;
}
-static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
+static int crypto_poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_generic(dctx, dst);
+ poly1305_update_arch(dctx, src, srclen);
return 0;
}
-static int poly1305_simd_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
+static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- poly1305_update_arch(dctx, src, srclen);
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ poly1305_final_arch(dctx, dst);
return 0;
}
static struct shash_alg alg = {
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_poly1305_init,
- .update = poly1305_simd_update,
+ .update = crypto_poly1305_update,
.final = crypto_poly1305_final,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
@@ -213,17 +261,19 @@ static struct shash_alg alg = {
static int __init poly1305_simd_mod_init(void)
{
- if (!boot_cpu_has(X86_FEATURE_XMM2))
- return 0;
-
- static_branch_enable(&poly1305_use_simd);
-
- if (IS_ENABLED(CONFIG_AS_AVX2) &&
- boot_cpu_has(X86_FEATURE_AVX) &&
+ if (IS_ENABLED(CONFIG_AS_AVX) && boot_cpu_has(X86_FEATURE_AVX) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
+ static_branch_enable(&poly1305_use_avx);
+ if (IS_ENABLED(CONFIG_AS_AVX2) && boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
static_branch_enable(&poly1305_use_avx2);
-
+ if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
+ /* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
+ boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X)
+ static_branch_enable(&poly1305_use_avx512);
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
}
@@ -237,7 +287,7 @@ module_init(poly1305_simd_mod_init);
module_exit(poly1305_simd_mod_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
MODULE_DESCRIPTION("Poly1305 authenticator");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-simd");
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 13fd8d3d2da0..f973ace44ad3 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -19,18 +19,16 @@
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
/* 16-way AVX2 parallel cipher functions */
-asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
+asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
+asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -44,13 +42,13 @@ static const struct common_glue_ctx serpent_enc = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) }
+ .fn_u = { .ecb = serpent_ecb_enc_16way }
}, {
.num_blocks = 8,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+ .fn_u = { .ecb = __serpent_encrypt }
} }
};
@@ -60,13 +58,13 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) }
+ .fn_u = { .ctr = serpent_ctr_16way }
}, {
.num_blocks = 8,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
+ .fn_u = { .ctr = serpent_ctr_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
+ .fn_u = { .ctr = __serpent_crypt_ctr }
} }
};
@@ -76,13 +74,13 @@ static const struct common_glue_ctx serpent_enc_xts = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) }
+ .fn_u = { .xts = serpent_xts_enc_16way }
}, {
.num_blocks = 8,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
+ .fn_u = { .xts = serpent_xts_enc_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
+ .fn_u = { .xts = serpent_xts_enc }
} }
};
@@ -92,13 +90,13 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) }
+ .fn_u = { .ecb = serpent_ecb_dec_16way }
}, {
.num_blocks = 8,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .ecb = __serpent_decrypt }
} }
};
@@ -108,13 +106,13 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) }
+ .fn_u = { .cbc = serpent_cbc_dec_16way }
}, {
.num_blocks = 8,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
+ .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .cbc = __serpent_decrypt }
} }
};
@@ -124,13 +122,13 @@ static const struct common_glue_ctx serpent_dec_xts = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) }
+ .fn_u = { .xts = serpent_xts_dec_16way }
}, {
.num_blocks = 8,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
+ .fn_u = { .xts = serpent_xts_dec_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
+ .fn_u = { .xts = serpent_xts_dec }
} }
};
@@ -146,8 +144,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
- req);
+ return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -166,8 +163,8 @@ static int xts_encrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_enc_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx, false);
+ __serpent_encrypt, &ctx->tweak_ctx,
+ &ctx->crypt_ctx, false);
}
static int xts_decrypt(struct skcipher_request *req)
@@ -176,8 +173,8 @@ static int xts_decrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_dec_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+ __serpent_encrypt, &ctx->tweak_ctx,
+ &ctx->crypt_ctx, true);
}
static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 7d3dca38a5a2..7806d1cbe854 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -20,33 +20,35 @@
#include <asm/crypto/serpent-avx.h>
/* 8-way parallel cipher functions */
-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
const u8 *src, le128 *iv);
EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src, le128 *iv);
EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
-void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblk;
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
le128_to_be128(&ctrblk, iv);
le128_inc(iv);
@@ -56,17 +58,15 @@ void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
}
EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
-void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(__serpent_encrypt));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt);
}
EXPORT_SYMBOL_GPL(serpent_xts_enc);
-void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(__serpent_decrypt));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt);
}
EXPORT_SYMBOL_GPL(serpent_xts_dec);
@@ -102,10 +102,10 @@ static const struct common_glue_ctx serpent_enc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+ .fn_u = { .ecb = __serpent_encrypt }
} }
};
@@ -115,10 +115,10 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
+ .fn_u = { .ctr = serpent_ctr_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
+ .fn_u = { .ctr = __serpent_crypt_ctr }
} }
};
@@ -128,10 +128,10 @@ static const struct common_glue_ctx serpent_enc_xts = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
+ .fn_u = { .xts = serpent_xts_enc_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
+ .fn_u = { .xts = serpent_xts_enc }
} }
};
@@ -141,10 +141,10 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .ecb = __serpent_decrypt }
} }
};
@@ -154,10 +154,10 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
+ .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .cbc = __serpent_decrypt }
} }
};
@@ -167,10 +167,10 @@ static const struct common_glue_ctx serpent_dec_xts = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
+ .fn_u = { .xts = serpent_xts_dec_8way_avx }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
+ .fn_u = { .xts = serpent_xts_dec }
} }
};
@@ -186,8 +186,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
- req);
+ return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -206,8 +205,8 @@ static int xts_encrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_enc_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx, false);
+ __serpent_encrypt, &ctx->tweak_ctx,
+ &ctx->crypt_ctx, false);
}
static int xts_decrypt(struct skcipher_request *req)
@@ -216,8 +215,8 @@ static int xts_decrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_dec_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+ __serpent_encrypt, &ctx->tweak_ctx,
+ &ctx->crypt_ctx, true);
}
static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 5fdf1931d069..4fed8d26b91a 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -31,9 +31,11 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
+static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s)
{
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
unsigned int j;
for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
@@ -45,9 +47,11 @@ static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
}
-static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblk;
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
le128_to_be128(&ctrblk, iv);
le128_inc(iv);
@@ -56,10 +60,12 @@ static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
u128_xor(dst, src, (u128 *)&ctrblk);
}
-static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
+static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s,
le128 *iv)
{
be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
unsigned int i;
for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
@@ -79,10 +85,10 @@ static const struct common_glue_ctx serpent_enc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
+ .fn_u = { .ecb = serpent_enc_blk_xway }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+ .fn_u = { .ecb = __serpent_encrypt }
} }
};
@@ -92,10 +98,10 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
+ .fn_u = { .ctr = serpent_crypt_ctr_xway }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
+ .fn_u = { .ctr = serpent_crypt_ctr }
} }
};
@@ -105,10 +111,10 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
+ .fn_u = { .ecb = serpent_dec_blk_xway }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .ecb = __serpent_decrypt }
} }
};
@@ -118,10 +124,10 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
+ .fn_u = { .cbc = serpent_decrypt_cbc_xway }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .cbc = __serpent_decrypt }
} }
};
@@ -137,7 +143,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ return glue_cbc_encrypt_req_128bit(__serpent_encrypt,
req);
}
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 6decc85ef7b7..1e594d60afa5 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -62,11 +62,11 @@
*Visit http://software.intel.com/en-us/articles/
*and refer to improving-the-performance-of-the-secure-hash-algorithm-1/
*
- *Updates 20-byte SHA-1 record in 'hash' for even number of
- *'num_blocks' consecutive 64-byte blocks
+ *Updates 20-byte SHA-1 record at start of 'state', from 'input', for
+ *even number of 'blocks' consecutive 64-byte blocks.
*
*extern "C" void sha1_transform_avx2(
- * int *hash, const char* input, size_t num_blocks );
+ * struct sha1_state *state, const u8* input, int blocks );
*/
#include <linux/linkage.h>
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index 5d03c1173690..12e2d19d7402 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -457,9 +457,13 @@ W_PRECALC_SSSE3
movdqu \a,\b
.endm
-/* SSSE3 optimized implementation:
- * extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws,
- * unsigned int rounds);
+/*
+ * SSSE3 optimized implementation:
+ *
+ * extern "C" void sha1_transform_ssse3(struct sha1_state *state,
+ * const u8 *data, int blocks);
+ *
+ * Note that struct sha1_state is assumed to begin with u32 state[5].
*/
SHA1_VECTOR_ASM sha1_transform_ssse3
@@ -545,8 +549,8 @@ W_PRECALC_AVX
/* AVX optimized implementation:
- * extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws,
- * unsigned int rounds);
+ * extern "C" void sha1_transform_avx(struct sha1_state *state,
+ * const u8 *data, int blocks);
*/
SHA1_VECTOR_ASM sha1_transform_avx
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 639d4c2fd6a8..d70b40ad594c 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -27,11 +27,8 @@
#include <crypto/sha1_base.h>
#include <asm/simd.h>
-typedef void (sha1_transform_fn)(u32 *digest, const char *data,
- unsigned int rounds);
-
static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha1_transform_fn *sha1_xform)
+ unsigned int len, sha1_block_fn *sha1_xform)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@@ -39,48 +36,47 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len);
- /* make sure casting to sha1_block_fn() is safe */
+ /*
+ * Make sure struct sha1_state begins directly with the SHA1
+ * 160-bit internal state, as this is what the asm functions expect.
+ */
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
kernel_fpu_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_xform);
+ sha1_base_do_update(desc, data, len, sha1_xform);
kernel_fpu_end();
return 0;
}
static int sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
+ unsigned int len, u8 *out, sha1_block_fn *sha1_xform)
{
if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
kernel_fpu_begin();
if (len)
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_xform);
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform);
+ sha1_base_do_update(desc, data, len, sha1_xform);
+ sha1_base_do_finalize(desc, sha1_xform);
kernel_fpu_end();
return sha1_base_finish(desc, out);
}
-asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
- unsigned int rounds);
+asmlinkage void sha1_transform_ssse3(struct sha1_state *state,
+ const u8 *data, int blocks);
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha1_update(desc, data, len,
- (sha1_transform_fn *) sha1_transform_ssse3);
+ return sha1_update(desc, data, len, sha1_transform_ssse3);
}
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return sha1_finup(desc, data, len, out,
- (sha1_transform_fn *) sha1_transform_ssse3);
+ return sha1_finup(desc, data, len, out, sha1_transform_ssse3);
}
/* Add padding and return the message digest. */
@@ -119,21 +115,19 @@ static void unregister_sha1_ssse3(void)
}
#ifdef CONFIG_AS_AVX
-asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
- unsigned int rounds);
+asmlinkage void sha1_transform_avx(struct sha1_state *state,
+ const u8 *data, int blocks);
static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha1_update(desc, data, len,
- (sha1_transform_fn *) sha1_transform_avx);
+ return sha1_update(desc, data, len, sha1_transform_avx);
}
static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return sha1_finup(desc, data, len, out,
- (sha1_transform_fn *) sha1_transform_avx);
+ return sha1_finup(desc, data, len, out, sha1_transform_avx);
}
static int sha1_avx_final(struct shash_desc *desc, u8 *out)
@@ -190,8 +184,8 @@ static inline void unregister_sha1_avx(void) { }
#if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX)
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
-asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
- unsigned int rounds);
+asmlinkage void sha1_transform_avx2(struct sha1_state *state,
+ const u8 *data, int blocks);
static bool avx2_usable(void)
{
@@ -203,28 +197,26 @@ static bool avx2_usable(void)
return false;
}
-static void sha1_apply_transform_avx2(u32 *digest, const char *data,
- unsigned int rounds)
+static void sha1_apply_transform_avx2(struct sha1_state *state,
+ const u8 *data, int blocks)
{
/* Select the optimal transform based on data block size */
- if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE)
- sha1_transform_avx2(digest, data, rounds);
+ if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE)
+ sha1_transform_avx2(state, data, blocks);
else
- sha1_transform_avx(digest, data, rounds);
+ sha1_transform_avx(state, data, blocks);
}
static int sha1_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha1_update(desc, data, len,
- (sha1_transform_fn *) sha1_apply_transform_avx2);
+ return sha1_update(desc, data, len, sha1_apply_transform_avx2);
}
static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return sha1_finup(desc, data, len, out,
- (sha1_transform_fn *) sha1_apply_transform_avx2);
+ return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2);
}
static int sha1_avx2_final(struct shash_desc *desc, u8 *out)
@@ -267,21 +259,19 @@ static inline void unregister_sha1_avx2(void) { }
#endif
#ifdef CONFIG_AS_SHA1_NI
-asmlinkage void sha1_ni_transform(u32 *digest, const char *data,
- unsigned int rounds);
+asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
+ int rounds);
static int sha1_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha1_update(desc, data, len,
- (sha1_transform_fn *) sha1_ni_transform);
+ return sha1_update(desc, data, len, sha1_ni_transform);
}
static int sha1_ni_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return sha1_finup(desc, data, len, out,
- (sha1_transform_fn *) sha1_ni_transform);
+ return sha1_finup(desc, data, len, out, sha1_ni_transform);
}
static int sha1_ni_final(struct shash_desc *desc, u8 *out)
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 22e14c8dd2e4..fcbc30f58c38 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -341,8 +341,8 @@ a = TMP_
.endm
########################################################################
-## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks)
-## arg 1 : pointer to digest
+## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks)
+## arg 1 : pointer to state
## arg 2 : pointer to input data
## arg 3 : Num blocks
########################################################################
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 519b551ad576..499d9ec129de 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -520,8 +520,8 @@ STACK_SIZE = _RSP + _RSP_SIZE
.endm
########################################################################
-## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks)
-## arg 1 : pointer to digest
+## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks)
+## arg 1 : pointer to state
## arg 2 : pointer to input data
## arg 3 : Num blocks
########################################################################
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index 69cc2f91dc4c..ddfa863b4ee3 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -347,8 +347,10 @@ a = TMP_
.endm
########################################################################
-## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks)
-## arg 1 : pointer to digest
+## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data,
+## int blocks);
+## arg 1 : pointer to state
+## (struct sha256_state is assumed to begin with u32 state[8])
## arg 2 : pointer to input data
## arg 3 : Num blocks
########################################################################
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index f9aff31fe59e..03ad657c04bd 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -41,12 +41,11 @@
#include <linux/string.h>
#include <asm/simd.h>
-asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
- u64 rounds);
-typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds);
+asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
+ const u8 *data, int blocks);
static int _sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha256_transform_fn *sha256_xform)
+ unsigned int len, sha256_block_fn *sha256_xform)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
@@ -54,28 +53,29 @@ static int _sha256_update(struct shash_desc *desc, const u8 *data,
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len);
- /* make sure casting to sha256_block_fn() is safe */
+ /*
+ * Make sure struct sha256_state begins directly with the SHA256
+ * 256-bit internal state, as this is what the asm functions expect.
+ */
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
kernel_fpu_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_xform);
+ sha256_base_do_update(desc, data, len, sha256_xform);
kernel_fpu_end();
return 0;
}
static int sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
+ unsigned int len, u8 *out, sha256_block_fn *sha256_xform)
{
if (!crypto_simd_usable())
return crypto_sha256_finup(desc, data, len, out);
kernel_fpu_begin();
if (len)
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_xform);
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform);
+ sha256_base_do_update(desc, data, len, sha256_xform);
+ sha256_base_do_finalize(desc, sha256_xform);
kernel_fpu_end();
return sha256_base_finish(desc, out);
@@ -145,8 +145,8 @@ static void unregister_sha256_ssse3(void)
}
#ifdef CONFIG_AS_AVX
-asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
- u64 rounds);
+asmlinkage void sha256_transform_avx(struct sha256_state *state,
+ const u8 *data, int blocks);
static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -227,8 +227,8 @@ static inline void unregister_sha256_avx(void) { }
#endif
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
-asmlinkage void sha256_transform_rorx(u32 *digest, const char *data,
- u64 rounds);
+asmlinkage void sha256_transform_rorx(struct sha256_state *state,
+ const u8 *data, int blocks);
static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -307,8 +307,8 @@ static inline void unregister_sha256_avx2(void) { }
#endif
#ifdef CONFIG_AS_SHA256_NI
-asmlinkage void sha256_ni_transform(u32 *digest, const char *data,
- u64 rounds); /*unsigned int rounds);*/
+asmlinkage void sha256_ni_transform(struct sha256_state *digest,
+ const u8 *data, int rounds);
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 3704ddd7e5d5..90ea945ba5e6 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -271,11 +271,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm
########################################################################
-# void sha512_transform_avx(void* D, const void* M, u64 L)
-# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
-# The size of the message pointed to by M must be an integer multiple of SHA512
-# message blocks.
-# L is the message length in SHA512 blocks
+# void sha512_transform_avx(sha512_state *state, const u8 *data, int blocks)
+# Purpose: Updates the SHA512 digest stored at "state" with the message
+# stored in "data".
+# The size of the message pointed to by "data" must be an integer multiple
+# of SHA512 message blocks.
+# "blocks" is the message length in SHA512 blocks
########################################################################
SYM_FUNC_START(sha512_transform_avx)
cmp $0, msglen
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index 80d830e7ee09..3dd886b14e7d 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -563,11 +563,12 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm
########################################################################
-# void sha512_transform_rorx(void* D, const void* M, uint64_t L)#
-# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
-# The size of the message pointed to by M must be an integer multiple of SHA512
-# message blocks.
-# L is the message length in SHA512 blocks
+# void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks)
+# Purpose: Updates the SHA512 digest stored at "state" with the message
+# stored in "data".
+# The size of the message pointed to by "data" must be an integer multiple
+# of SHA512 message blocks.
+# "blocks" is the message length in SHA512 blocks
########################################################################
SYM_FUNC_START(sha512_transform_rorx)
# Allocate Stack Space
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index 838f984e95d9..7946a1bee85b 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -269,11 +269,14 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
.endm
########################################################################
-# void sha512_transform_ssse3(void* D, const void* M, u64 L)#
-# Purpose: Updates the SHA512 digest stored at D with the message stored in M.
-# The size of the message pointed to by M must be an integer multiple of SHA512
-# message blocks.
-# L is the message length in SHA512 blocks.
+## void sha512_transform_ssse3(struct sha512_state *state, const u8 *data,
+## int blocks);
+# (struct sha512_state is assumed to begin with u64 state[8])
+# Purpose: Updates the SHA512 digest stored at "state" with the message
+# stored in "data".
+# The size of the message pointed to by "data" must be an integer multiple
+# of SHA512 message blocks.
+# "blocks" is the message length in SHA512 blocks.
########################################################################
SYM_FUNC_START(sha512_transform_ssse3)
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 458356a3f124..1c444f41037c 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -39,13 +39,11 @@
#include <crypto/sha512_base.h>
#include <asm/simd.h>
-asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
- u64 rounds);
-
-typedef void (sha512_transform_fn)(u64 *digest, const char *data, u64 rounds);
+asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
+ const u8 *data, int blocks);
static int sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha512_transform_fn *sha512_xform)
+ unsigned int len, sha512_block_fn *sha512_xform)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
@@ -53,28 +51,29 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len);
- /* make sure casting to sha512_block_fn() is safe */
+ /*
+ * Make sure struct sha512_state begins directly with the SHA512
+ * 512-bit internal state, as this is what the asm functions expect.
+ */
BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0);
kernel_fpu_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_xform);
+ sha512_base_do_update(desc, data, len, sha512_xform);
kernel_fpu_end();
return 0;
}
static int sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
+ unsigned int len, u8 *out, sha512_block_fn *sha512_xform)
{
if (!crypto_simd_usable())
return crypto_sha512_finup(desc, data, len, out);
kernel_fpu_begin();
if (len)
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_xform);
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform);
+ sha512_base_do_update(desc, data, len, sha512_xform);
+ sha512_base_do_finalize(desc, sha512_xform);
kernel_fpu_end();
return sha512_base_finish(desc, out);
@@ -144,8 +143,8 @@ static void unregister_sha512_ssse3(void)
}
#ifdef CONFIG_AS_AVX
-asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
- u64 rounds);
+asmlinkage void sha512_transform_avx(struct sha512_state *state,
+ const u8 *data, int blocks);
static bool avx_usable(void)
{
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
@@ -225,8 +224,8 @@ static inline void unregister_sha512_avx(void) { }
#endif
#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX)
-asmlinkage void sha512_transform_rorx(u64 *digest, const char *data,
- u64 rounds);
+asmlinkage void sha512_transform_rorx(struct sha512_state *state,
+ const u8 *data, int blocks);
static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index d561c821788b..2dbc8ce3730e 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -22,20 +22,17 @@
#define TWOFISH_PARALLEL_BLOCKS 8
/* 8-way parallel cipher functions */
-asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
-asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -43,22 +40,19 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
return twofish_setkey(&tfm->base, key, keylen);
}
-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, false);
}
-static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(twofish_enc_blk));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk);
}
-static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(twofish_dec_blk));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk);
}
struct twofish_xts_ctx {
@@ -70,7 +64,6 @@ static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int err;
err = xts_verify_key(tfm, key, keylen);
@@ -78,13 +71,12 @@ static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
return err;
/* first half of xts-key is for crypt */
- err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2);
if (err)
return err;
/* second half of xts-key is for tweak */
- return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
+ return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
}
static const struct common_glue_ctx twofish_enc = {
@@ -93,13 +85,13 @@ static const struct common_glue_ctx twofish_enc = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) }
+ .fn_u = { .ecb = twofish_ecb_enc_8way }
}, {
.num_blocks = 3,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
+ .fn_u = { .ecb = twofish_enc_blk_3way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
+ .fn_u = { .ecb = twofish_enc_blk }
} }
};
@@ -109,13 +101,13 @@ static const struct common_glue_ctx twofish_ctr = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) }
+ .fn_u = { .ctr = twofish_ctr_8way }
}, {
.num_blocks = 3,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
+ .fn_u = { .ctr = twofish_enc_blk_ctr_3way }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) }
+ .fn_u = { .ctr = twofish_enc_blk_ctr }
} }
};
@@ -125,10 +117,10 @@ static const struct common_glue_ctx twofish_enc_xts = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) }
+ .fn_u = { .xts = twofish_xts_enc_8way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) }
+ .fn_u = { .xts = twofish_xts_enc }
} }
};
@@ -138,13 +130,13 @@ static const struct common_glue_ctx twofish_dec = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) }
+ .fn_u = { .ecb = twofish_ecb_dec_8way }
}, {
.num_blocks = 3,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
+ .fn_u = { .ecb = twofish_dec_blk_3way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
+ .fn_u = { .ecb = twofish_dec_blk }
} }
};
@@ -154,13 +146,13 @@ static const struct common_glue_ctx twofish_dec_cbc = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) }
+ .fn_u = { .cbc = twofish_cbc_dec_8way }
}, {
.num_blocks = 3,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
+ .fn_u = { .cbc = twofish_dec_blk_cbc_3way }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
+ .fn_u = { .cbc = twofish_dec_blk }
} }
};
@@ -170,10 +162,10 @@ static const struct common_glue_ctx twofish_dec_xts = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) }
+ .fn_u = { .xts = twofish_xts_dec_8way }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) }
+ .fn_u = { .xts = twofish_xts_dec }
} }
};
@@ -189,8 +181,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
- req);
+ return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -208,8 +199,7 @@ static int xts_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&twofish_enc_xts, req,
- XTS_TWEAK_CAST(twofish_enc_blk),
+ return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk,
&ctx->tweak_ctx, &ctx->crypt_ctx, false);
}
@@ -218,8 +208,7 @@ static int xts_decrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_req_128bit(&twofish_dec_xts, req,
- XTS_TWEAK_CAST(twofish_enc_blk),
+ return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk,
&ctx->tweak_ctx, &ctx->crypt_ctx, true);
}
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 1dc9e29f221e..768af6075479 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -25,21 +25,22 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
return twofish_setkey(&tfm->base, key, keylen);
}
-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, false);
}
-static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
+static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst,
const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, true);
}
-void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
+void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s)
{
u128 ivs[2];
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
ivs[0] = src[0];
ivs[1] = src[1];
@@ -51,9 +52,11 @@ void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
}
EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
-void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblk;
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
if (dst != src)
*dst = *src;
@@ -66,10 +69,11 @@ void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
}
EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr);
-void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
- le128 *iv)
+void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblks[3];
+ u128 *dst = (u128 *)d;
+ const u128 *src = (const u128 *)s;
if (dst != src) {
dst[0] = src[0];
@@ -94,10 +98,10 @@ static const struct common_glue_ctx twofish_enc = {
.funcs = { {
.num_blocks = 3,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
+ .fn_u = { .ecb = twofish_enc_blk_3way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
+ .fn_u = { .ecb = twofish_enc_blk }
} }
};
@@ -107,10 +111,10 @@ static const struct common_glue_ctx twofish_ctr = {
.funcs = { {
.num_blocks = 3,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) }
+ .fn_u = { .ctr = twofish_enc_blk_ctr_3way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) }
+ .fn_u = { .ctr = twofish_enc_blk_ctr }
} }
};
@@ -120,10 +124,10 @@ static const struct common_glue_ctx twofish_dec = {
.funcs = { {
.num_blocks = 3,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
+ .fn_u = { .ecb = twofish_dec_blk_3way }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
+ .fn_u = { .ecb = twofish_dec_blk }
} }
};
@@ -133,10 +137,10 @@ static const struct common_glue_ctx twofish_dec_cbc = {
.funcs = { {
.num_blocks = 3,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
+ .fn_u = { .cbc = twofish_dec_blk_cbc_3way }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
+ .fn_u = { .cbc = twofish_dec_blk }
} }
};
@@ -152,8 +156,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
- req);
+ return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
}
static int cbc_decrypt(struct skcipher_request *req)
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
index a5d86fc0593f..f6d91861cb14 100644
--- a/arch/x86/include/asm/crypto/camellia.h
+++ b/arch/x86/include/asm/crypto/camellia.h
@@ -26,71 +26,66 @@ struct camellia_xts_ctx {
extern int __camellia_setkey(struct camellia_ctx *cctx,
const unsigned char *key,
- unsigned int key_len, u32 *flags);
+ unsigned int key_len);
extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
/* regular block cipher functions */
-asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
+ bool xor);
+asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
/* 2-way parallel cipher functions */
-asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
+ bool xor);
+asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
/* 16-way parallel cipher functions (avx/aes-ni) */
-asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
-
-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-
-asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-
-static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src)
+asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
+
+asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+
+asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+
+static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src)
{
__camellia_enc_blk(ctx, dst, src, false);
}
-static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src)
{
__camellia_enc_blk(ctx, dst, src, true);
}
-static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst,
const u8 *src)
{
__camellia_enc_blk_2way(ctx, dst, src, false);
}
-static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst,
const u8 *src)
{
__camellia_enc_blk_2way(ctx, dst, src, true);
}
/* glue helpers */
-extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
-extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src);
+extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
+extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
-extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
+extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
#endif /* ASM_X86_CAMELLIA_H */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 8d4a8e1226ee..777c0f63418c 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -11,18 +11,13 @@
#include <asm/fpu/api.h>
#include <crypto/b128ops.h>
-typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
-typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
-typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
+typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src);
+typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src);
+typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
+typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
-#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
-#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
-#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
-
struct common_glue_func_entry {
unsigned int num_blocks; /* number of blocks that @fn will process */
union {
@@ -116,7 +111,8 @@ extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx, bool decrypt);
-extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
- le128 *iv, common_glue_func_t fn);
+extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst,
+ const u8 *src, le128 *iv,
+ common_glue_func_t fn);
#endif /* _CRYPTO_GLUE_HELPER_H */
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
index db7c9cc32234..251c2c89d7cf 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -15,26 +15,26 @@ struct serpent_xts_ctx {
struct serpent_ctx crypt_ctx;
};
-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src, le128 *iv);
-extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
-extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
+extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h
index 1a345e8a7496..860ca248914b 100644
--- a/arch/x86/include/asm/crypto/serpent-sse2.h
+++ b/arch/x86/include/asm/crypto/serpent-sse2.h
@@ -9,25 +9,23 @@
#define SERPENT_PARALLEL_BLOCKS 4
-asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
{
__serpent_enc_blk_4way(ctx, dst, src, false);
}
-static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
+ u8 *dst, const u8 *src)
{
__serpent_enc_blk_4way(ctx, dst, src, true);
}
-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
{
serpent_dec_blk_4way(ctx, dst, src);
}
@@ -36,25 +34,23 @@ static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
#define SERPENT_PARALLEL_BLOCKS 8
-asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
{
__serpent_enc_blk_8way(ctx, dst, src, false);
}
-static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
+ u8 *dst, const u8 *src)
{
__serpent_enc_blk_8way(ctx, dst, src, true);
}
-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
+static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
{
serpent_dec_blk_8way(ctx, dst, src);
}
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
index f618bf272b90..2c377a8042e1 100644
--- a/arch/x86/include/asm/crypto/twofish.h
+++ b/arch/x86/include/asm/crypto/twofish.h
@@ -7,22 +7,19 @@
#include <crypto/b128ops.h>
/* regular block cipher functions from twofish_x86_64 module */
-asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src);
/* 3-way parallel cipher functions */
-asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src);
+asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src,
+ bool xor);
+asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src);
/* helpers from twofish_x86_64-3way module */
-extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
-extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
+extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src);
+extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
-extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
+extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
#endif /* ASM_X86_TWOFISH_H */
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 5575d48473bd..cdb51d4272d0 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -511,10 +511,10 @@ config CRYPTO_ESSIV
encryption.
This driver implements a crypto API template that can be
- instantiated either as a skcipher or as a aead (depending on the
+ instantiated either as an skcipher or as an AEAD (depending on the
type of the first template argument), and which defers encryption
and decryption requests to the encapsulated cipher after applying
- ESSIV to the input IV. Note that in the aead case, it is assumed
+ ESSIV to the input IV. Note that in the AEAD case, it is assumed
that the keys are presented in the same format used by the authenc
template, and that the IV appears at the end of the authenticated
associated data (AAD) region (which is how dm-crypt uses it.)
diff --git a/crypto/acompress.c b/crypto/acompress.c
index abadcb035a41..84a76723e851 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -151,9 +151,9 @@ int crypto_register_acomp(struct acomp_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_register_acomp);
-int crypto_unregister_acomp(struct acomp_alg *alg)
+void crypto_unregister_acomp(struct acomp_alg *alg)
{
- return crypto_unregister_alg(&alg->base);
+ crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 9dc53cf9b1f1..cf2b9f4103dd 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -39,8 +39,6 @@
#include <crypto/scatterwalk.h>
#include <linux/module.h>
-#include "internal.h"
-
/*
* Size of right-hand part of input data, in bytes; also the size of the block
* cipher's block size and the hash function's output.
@@ -64,7 +62,7 @@
struct adiantum_instance_ctx {
struct crypto_skcipher_spawn streamcipher_spawn;
- struct crypto_spawn blockcipher_spawn;
+ struct crypto_cipher_spawn blockcipher_spawn;
struct crypto_shash_spawn hash_spawn;
};
@@ -72,7 +70,7 @@ struct adiantum_tfm_ctx {
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
- struct poly1305_key header_hash_key;
+ struct poly1305_core_key header_hash_key;
};
struct adiantum_request_ctx {
@@ -135,9 +133,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->streamcipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -167,9 +162,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tctx->blockcipher, keyp,
BLOCKCIPHER_KEY_SIZE);
- crypto_skcipher_set_flags(tfm,
- crypto_cipher_get_flags(tctx->blockcipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
goto out;
keyp += BLOCKCIPHER_KEY_SIZE;
@@ -182,8 +174,6 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
- crypto_skcipher_set_flags(tfm, crypto_shash_get_flags(tctx->hash) &
- CRYPTO_TFM_RES_MASK);
keyp += NHPOLY1305_KEY_SIZE;
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
out:
@@ -249,7 +239,7 @@ static void adiantum_hash_header(struct skcipher_request *req)
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
- poly1305_core_emit(&state, &rctx->header_hash);
+ poly1305_core_emit(&state, NULL, &rctx->header_hash);
}
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
@@ -469,7 +459,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->streamcipher_spawn);
- crypto_drop_spawn(&ictx->blockcipher_spawn);
+ crypto_drop_cipher(&ictx->blockcipher_spawn);
crypto_drop_shash(&ictx->hash_spawn);
kfree(inst);
}
@@ -501,14 +491,12 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
- const char *streamcipher_name;
- const char *blockcipher_name;
+ u32 mask;
const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
struct skcipher_alg *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
- struct crypto_alg *_hash_alg;
struct shash_alg *hash_alg;
int err;
@@ -519,19 +507,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return -EINVAL;
- streamcipher_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(streamcipher_name))
- return PTR_ERR(streamcipher_name);
-
- blockcipher_name = crypto_attr_alg_name(tb[2]);
- if (IS_ERR(blockcipher_name))
- return PTR_ERR(blockcipher_name);
-
- nhpoly1305_name = crypto_attr_alg_name(tb[3]);
- if (nhpoly1305_name == ERR_PTR(-ENOENT))
- nhpoly1305_name = "nhpoly1305";
- if (IS_ERR(nhpoly1305_name))
- return PTR_ERR(nhpoly1305_name);
+ mask = crypto_requires_sync(algt->type, algt->mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
@@ -539,37 +515,31 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
- crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
- skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
- 0, crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
+ skcipher_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
- crypto_set_spawn(&ictx->blockcipher_spawn,
- skcipher_crypto_instance(inst));
- err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
- CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
+ err = crypto_grab_cipher(&ictx->blockcipher_spawn,
+ skcipher_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
- goto out_drop_streamcipher;
- blockcipher_alg = ictx->blockcipher_spawn.alg;
+ goto err_free_inst;
+ blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
/* NHPoly1305 ε-∆U hash function */
- _hash_alg = crypto_alg_mod_lookup(nhpoly1305_name,
- CRYPTO_ALG_TYPE_SHASH,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(_hash_alg)) {
- err = PTR_ERR(_hash_alg);
- goto out_drop_blockcipher;
- }
- hash_alg = __crypto_shash_alg(_hash_alg);
- err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg,
- skcipher_crypto_instance(inst));
+ nhpoly1305_name = crypto_attr_alg_name(tb[3]);
+ if (nhpoly1305_name == ERR_PTR(-ENOENT))
+ nhpoly1305_name = "nhpoly1305";
+ err = crypto_grab_shash(&ictx->hash_spawn,
+ skcipher_crypto_instance(inst),
+ nhpoly1305_name, 0, mask);
if (err)
- goto out_put_hash;
+ goto err_free_inst;
+ hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
/* Check the set of algorithms */
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
@@ -578,7 +548,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name, hash_alg->base.cra_name);
err = -EINVAL;
- goto out_drop_hash;
+ goto err_free_inst;
}
/* Instance fields */
@@ -587,13 +557,13 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s)", streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_hash;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s,%s)",
streamcipher_alg->base.cra_driver_name,
blockcipher_alg->cra_driver_name,
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_hash;
+ goto err_free_inst;
inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
CRYPTO_ALG_ASYNC;
@@ -623,22 +593,10 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->free = adiantum_free_instance;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto out_drop_hash;
-
- crypto_mod_put(_hash_alg);
- return 0;
-
-out_drop_hash:
- crypto_drop_shash(&ictx->hash_spawn);
-out_put_hash:
- crypto_mod_put(_hash_alg);
-out_drop_blockcipher:
- crypto_drop_spawn(&ictx->blockcipher_spawn);
-out_drop_streamcipher:
- crypto_drop_skcipher(&ictx->streamcipher_spawn);
-out_free_inst:
- kfree(inst);
+ if (err) {
+err_free_inst:
+ adiantum_free_instance(inst);
+ }
return err;
}
diff --git a/crypto/aead.c b/crypto/aead.c
index 47f16d139e8e..16991095270d 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -185,11 +185,6 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
{
struct aead_instance *aead = aead_instance(inst);
- if (!aead->free) {
- inst->tmpl->free(inst);
- return;
- }
-
aead->free(aead);
}
@@ -207,11 +202,12 @@ static const struct crypto_type crypto_aead_type = {
.tfmsize = offsetof(struct crypto_aead, base),
};
-int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
- u32 type, u32 mask)
+int crypto_grab_aead(struct crypto_aead_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_aead_type;
- return crypto_grab_spawn(&spawn->base, name, type, mask);
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_aead);
@@ -292,6 +288,9 @@ int aead_register_instance(struct crypto_template *tmpl,
{
int err;
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
err = aead_prepare_alg(&inst->alg);
if (err)
return err;
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 71c11cb5bad1..44fb4956f0dd 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -372,10 +372,8 @@ static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key,
{
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
- if (keylen != AEGIS128_KEY_SIZE) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AEGIS128_KEY_SIZE)
return -EINVAL;
- }
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
return 0;
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 22e5867177f1..27ab27931813 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1127,24 +1127,18 @@ EXPORT_SYMBOL_GPL(crypto_it_tab);
* @in_key: The input key.
* @key_len: The size of the key.
*
- * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
- * is set. The function uses aes_expand_key() to expand the key.
- * &crypto_aes_ctx _must_ be the private data embedded in @tfm which is
- * retrieved with crypto_tfm_ctx().
+ * This function uses aes_expand_key() to expand the key. &crypto_aes_ctx
+ * _must_ be the private data embedded in @tfm which is retrieved with
+ * crypto_tfm_ctx().
+ *
+ * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
*/
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int ret;
-
- ret = aes_expandkey(ctx, in_key, key_len);
- if (!ret)
- return 0;
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ return aes_expandkey(ctx, in_key, key_len);
}
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 0dceaabc6321..3d8e53010cda 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -134,11 +134,13 @@ void af_alg_release_parent(struct sock *sk)
sk = ask->parent;
ask = alg_sk(sk);
- lock_sock(sk);
+ local_bh_disable();
+ bh_lock_sock(sk);
ask->nokey_refcnt -= nokey;
if (!last)
last = !--ask->refcnt;
- release_sock(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
if (last)
sock_put(sk);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3815b363a693..68a0f0cb75c4 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -23,6 +23,8 @@
#include "internal.h"
+static const struct crypto_type crypto_ahash_type;
+
struct ahash_request_priv {
crypto_completion_t complete;
void *data;
@@ -509,6 +511,13 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
return crypto_alg_extsize(alg);
}
+static void crypto_ahash_free_instance(struct crypto_instance *inst)
+{
+ struct ahash_instance *ahash = ahash_instance(inst);
+
+ ahash->free(ahash);
+}
+
#ifdef CONFIG_NET
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -542,9 +551,10 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
-const struct crypto_type crypto_ahash_type = {
+static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
+ .free = crypto_ahash_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
@@ -554,7 +564,15 @@ const struct crypto_type crypto_ahash_type = {
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
};
-EXPORT_SYMBOL_GPL(crypto_ahash_type);
+
+int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_ahash_type;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_ahash);
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask)
@@ -598,9 +616,9 @@ int crypto_register_ahash(struct ahash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);
-int crypto_unregister_ahash(struct ahash_alg *alg)
+void crypto_unregister_ahash(struct ahash_alg *alg)
{
- return crypto_unregister_alg(&alg->halg.base);
+ crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
@@ -638,6 +656,9 @@ int ahash_register_instance(struct crypto_template *tmpl,
{
int err;
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
err = ahash_prepare_alg(&inst->alg);
if (err)
return err;
@@ -646,31 +667,6 @@ int ahash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
-void ahash_free_instance(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(ahash_instance(inst));
-}
-EXPORT_SYMBOL_GPL(ahash_free_instance);
-
-int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
- struct hash_alg_common *alg,
- struct crypto_instance *inst)
-{
- return crypto_init_spawn2(&spawn->base, &alg->base, inst,
- &crypto_ahash_type);
-}
-EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
-
-struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
-{
- struct crypto_alg *alg;
-
- alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
- return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
-}
-EXPORT_SYMBOL_GPL(ahash_attr_alg);
-
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
struct crypto_alg *alg = &halg->base;
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 7d5cf4939423..f866085c8a4a 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -90,11 +90,12 @@ static const struct crypto_type crypto_akcipher_type = {
.tfmsize = offsetof(struct crypto_akcipher, base),
};
-int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
- u32 type, u32 mask)
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_akcipher_type;
- return crypto_grab_spawn(&spawn->base, name, type, mask);
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
@@ -146,6 +147,8 @@ EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
int akcipher_register_instance(struct crypto_template *tmpl,
struct akcipher_instance *inst)
{
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
akcipher_prepare_alg(&inst->alg);
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index b052f38edba6..69605e21af92 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -65,11 +65,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
static void crypto_free_instance(struct crypto_instance *inst)
{
- if (!inst->alg.cra_type->free) {
- inst->tmpl->free(inst);
- return;
- }
-
inst->alg.cra_type->free(inst);
}
@@ -82,6 +77,15 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
crypto_tmpl_put(tmpl);
}
+/*
+ * This function adds a spawn to the list secondary_spawns which
+ * will be used at the end of crypto_remove_spawns to unregister
+ * instances, unless the spawn happens to be one that is depended
+ * on by the new algorithm (nalg in crypto_remove_spawns).
+ *
+ * This function is also responsible for resurrecting any algorithms
+ * in the dependency chain of nalg by unsetting n->dead.
+ */
static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
struct list_head *stack,
struct list_head *top,
@@ -93,15 +97,17 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
if (!spawn)
return NULL;
- n = list_next_entry(spawn, list);
+ n = list_prev_entry(spawn, list);
+ list_move(&spawn->list, secondary_spawns);
- if (spawn->alg && &n->list != stack && !n->alg)
- n->alg = (n->list.next == stack) ? alg :
- &list_next_entry(n, list)->inst->alg;
+ if (list_is_last(&n->list, stack))
+ return top;
- list_move(&spawn->list, secondary_spawns);
+ n = list_next_entry(n, list);
+ if (!spawn->dead)
+ n->dead = false;
- return &n->list == stack ? top : &n->inst->alg.cra_users;
+ return &n->inst->alg.cra_users;
}
static void crypto_remove_instance(struct crypto_instance *inst,
@@ -113,8 +119,6 @@ static void crypto_remove_instance(struct crypto_instance *inst,
return;
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
- if (hlist_unhashed(&inst->list))
- return;
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
@@ -126,6 +130,12 @@ static void crypto_remove_instance(struct crypto_instance *inst,
BUG_ON(!list_empty(&inst->alg.cra_users));
}
+/*
+ * Given an algorithm alg, remove all algorithms that depend on it
+ * through spawns. If nalg is not null, then exempt any algorithms
+ * that is depended on by nalg. This is useful when nalg itself
+ * depends on alg.
+ */
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg)
{
@@ -144,6 +154,11 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
list_move(&spawn->list, &top);
}
+ /*
+ * Perform a depth-first walk starting from alg through
+ * the cra_users tree. The list stack records the path
+ * from alg to the current spawn.
+ */
spawns = &top;
do {
while (!list_empty(spawns)) {
@@ -153,17 +168,26 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
list);
inst = spawn->inst;
- BUG_ON(&inst->alg == alg);
-
list_move(&spawn->list, &stack);
+ spawn->dead = !spawn->registered || &inst->alg != nalg;
+
+ if (!spawn->registered)
+ break;
+
+ BUG_ON(&inst->alg == alg);
if (&inst->alg == nalg)
break;
- spawn->alg = NULL;
spawns = &inst->alg.cra_users;
/*
+ * Even if spawn->registered is true, the
+ * instance itself may still be unregistered.
+ * This is because it may have failed during
+ * registration. Therefore we still need to
+ * make the following test.
+ *
* We may encounter an unregistered instance here, since
* an instance's spawns are set up prior to the instance
* being registered. An unregistered instance will have
@@ -178,10 +202,15 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
&secondary_spawns)));
+ /*
+ * Remove all instances that are marked as dead. Also
+ * complete the resurrection of the others by moving them
+ * back to the cra_users list.
+ */
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
- if (spawn->alg)
+ if (!spawn->dead)
list_move(&spawn->list, &spawn->alg->cra_users);
- else
+ else if (spawn->registered)
crypto_remove_instance(spawn->inst, list);
}
}
@@ -257,6 +286,7 @@ void crypto_alg_tested(const char *name, int err)
struct crypto_alg *alg;
struct crypto_alg *q;
LIST_HEAD(list);
+ bool best;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
@@ -280,6 +310,21 @@ found:
alg->cra_flags |= CRYPTO_ALG_TESTED;
+ /* Only satisfy larval waiters if we are the best. */
+ best = true;
+ list_for_each_entry(q, &crypto_alg_list, cra_list) {
+ if (crypto_is_moribund(q) || !crypto_is_larval(q))
+ continue;
+
+ if (strcmp(alg->cra_name, q->cra_name))
+ continue;
+
+ if (q->cra_priority > alg->cra_priority) {
+ best = false;
+ break;
+ }
+ }
+
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
continue;
@@ -303,10 +348,12 @@ found:
continue;
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
continue;
- if (!crypto_mod_get(alg))
- continue;
- larval->adult = alg;
+ if (best && crypto_mod_get(alg))
+ larval->adult = alg;
+ else
+ larval->adult = ERR_PTR(-EAGAIN);
+
continue;
}
@@ -397,7 +444,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
return 0;
}
-int crypto_unregister_alg(struct crypto_alg *alg)
+void crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
@@ -406,15 +453,14 @@ int crypto_unregister_alg(struct crypto_alg *alg)
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
- if (ret)
- return ret;
+ if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
+ return;
BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
crypto_remove_final(&list);
- return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
@@ -438,18 +484,12 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_register_algs);
-int crypto_unregister_algs(struct crypto_alg *algs, int count)
+void crypto_unregister_algs(struct crypto_alg *algs, int count)
{
- int i, ret;
-
- for (i = 0; i < count; i++) {
- ret = crypto_unregister_alg(&algs[i]);
- if (ret)
- pr_err("Failed to unregister %s %s: %d\n",
- algs[i].cra_driver_name, algs[i].cra_name, ret);
- }
+ int i;
- return 0;
+ for (i = 0; i < count; i++)
+ crypto_unregister_alg(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_algs);
@@ -561,6 +601,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst)
{
struct crypto_larval *larval;
+ struct crypto_spawn *spawn;
int err;
err = crypto_check_alg(&inst->alg);
@@ -572,6 +613,22 @@ int crypto_register_instance(struct crypto_template *tmpl,
down_write(&crypto_alg_sem);
+ larval = ERR_PTR(-EAGAIN);
+ for (spawn = inst->spawns; spawn;) {
+ struct crypto_spawn *next;
+
+ if (spawn->dead)
+ goto unlock;
+
+ next = spawn->next;
+ spawn->inst = inst;
+ spawn->registered = true;
+
+ crypto_mod_put(spawn->alg);
+
+ spawn = next;
+ }
+
larval = __crypto_register_alg(&inst->alg);
if (IS_ERR(larval))
goto unlock;
@@ -594,7 +651,7 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
-int crypto_unregister_instance(struct crypto_instance *inst)
+void crypto_unregister_instance(struct crypto_instance *inst)
{
LIST_HEAD(list);
@@ -606,97 +663,70 @@ int crypto_unregister_instance(struct crypto_instance *inst)
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
-
- return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_instance);
-int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst, u32 mask)
+int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
{
+ struct crypto_alg *alg;
int err = -EAGAIN;
if (WARN_ON_ONCE(inst == NULL))
return -EINVAL;
- spawn->inst = inst;
- spawn->mask = mask;
+ /* Allow the result of crypto_attr_alg_name() to be passed directly */
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ alg = crypto_find_alg(name, spawn->frontend, type, mask);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
down_write(&crypto_alg_sem);
if (!crypto_is_moribund(alg)) {
list_add(&spawn->list, &alg->cra_users);
spawn->alg = alg;
+ spawn->mask = mask;
+ spawn->next = inst->spawns;
+ inst->spawns = spawn;
err = 0;
}
up_write(&crypto_alg_sem);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(crypto_init_spawn);
-
-int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst,
- const struct crypto_type *frontend)
-{
- int err = -EINVAL;
-
- if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
- goto out;
-
- spawn->frontend = frontend;
- err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(crypto_init_spawn2);
-
-int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
- u32 type, u32 mask)
-{
- struct crypto_alg *alg;
- int err;
-
- alg = crypto_find_alg(name, spawn->frontend, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
-
- err = crypto_init_spawn(spawn, alg, spawn->inst, mask);
- crypto_mod_put(alg);
+ if (err)
+ crypto_mod_put(alg);
return err;
}
EXPORT_SYMBOL_GPL(crypto_grab_spawn);
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
- if (!spawn->alg)
+ if (!spawn->alg) /* not yet initialized? */
return;
down_write(&crypto_alg_sem);
- list_del(&spawn->list);
+ if (!spawn->dead)
+ list_del(&spawn->list);
up_write(&crypto_alg_sem);
+
+ if (!spawn->registered)
+ crypto_mod_put(spawn->alg);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
- struct crypto_alg *alg2;
down_read(&crypto_alg_sem);
alg = spawn->alg;
- alg2 = alg;
- if (alg2)
- alg2 = crypto_mod_get(alg2);
- up_read(&crypto_alg_sem);
-
- if (!alg2) {
- if (alg)
- crypto_shoot_alg(alg);
- return ERR_PTR(-EAGAIN);
+ if (!spawn->dead && !crypto_mod_get(alg)) {
+ alg->cra_flags |= CRYPTO_ALG_DYING;
+ alg = NULL;
}
+ up_read(&crypto_alg_sem);
- return alg;
+ return alg ?: ERR_PTR(-EAGAIN);
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@@ -809,20 +839,6 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
- const struct crypto_type *frontend,
- u32 type, u32 mask)
-{
- const char *name;
-
- name = crypto_attr_alg_name(rta);
- if (IS_ERR(name))
- return ERR_CAST(name);
-
- return crypto_find_alg(name, frontend, type, mask);
-}
-EXPORT_SYMBOL_GPL(crypto_attr_alg2);
-
int crypto_attr_u32(struct rtattr *rta, u32 *num)
{
struct crypto_attr_u32 *nu32;
@@ -856,32 +872,6 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name,
}
EXPORT_SYMBOL_GPL(crypto_inst_setname);
-void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
- unsigned int head)
-{
- struct crypto_instance *inst;
- char *p;
- int err;
-
- p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
- GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- inst = (void *)(p + head);
-
- err = crypto_inst_setname(inst, name, alg);
- if (err)
- goto err_free_inst;
-
- return p;
-
-err_free_inst:
- kfree(p);
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(crypto_alloc_instance);
-
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
INIT_LIST_HEAD(&queue->list);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index a62149d6c839..535f1f87e6c1 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -58,7 +58,6 @@ static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
- struct crypto_instance *inst;
int err;
tmpl = crypto_lookup_template(param->template);
@@ -66,16 +65,7 @@ static int cryptomgr_probe(void *data)
goto out;
do {
- if (tmpl->create) {
- err = tmpl->create(tmpl, param->tb);
- continue;
- }
-
- inst = tmpl->alloc(param->tb);
- if (IS_ERR(inst))
- err = PTR_ERR(inst);
- else if ((err = crypto_register_instance(tmpl, inst)))
- tmpl->free(inst);
+ err = tmpl->create(tmpl, param->tb);
} while (err == -EAGAIN && !signal_pending(current));
crypto_tmpl_put(tmpl);
diff --git a/crypto/anubis.c b/crypto/anubis.c
index f9ce78fde6ee..5da0241ef453 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -464,7 +464,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
- u32 *flags = &tfm->crt_flags;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
@@ -474,7 +473,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
case 32: case 36: case 40:
break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/crypto/api.c b/crypto/api.c
index 55bca28df92d..7d71a9b10e5f 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -97,7 +97,7 @@ static void crypto_larval_destroy(struct crypto_alg *alg)
struct crypto_larval *larval = (void *)alg;
BUG_ON(!crypto_is_larval(alg));
- if (larval->adult)
+ if (!IS_ERR_OR_NULL(larval->adult))
crypto_mod_put(larval->adult);
kfree(larval);
}
@@ -178,6 +178,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
alg = ERR_PTR(-ETIMEDOUT);
else if (!alg)
alg = ERR_PTR(-ENOENT);
+ else if (IS_ERR(alg))
+ ;
else if (crypto_is_test_larval(larval) &&
!(alg->cra_flags & CRYPTO_ALG_TESTED))
alg = ERR_PTR(-EAGAIN);
@@ -295,20 +297,7 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
if (type_obj)
return type_obj->init(tfm, type, mask);
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_ops(tfm);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
+ return 0;
}
static void crypto_exit_ops(struct crypto_tfm *tfm)
@@ -344,13 +333,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
return len;
}
-void crypto_shoot_alg(struct crypto_alg *alg)
+static void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
-EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)
@@ -516,7 +504,7 @@ EXPORT_SYMBOL_GPL(crypto_find_alg);
*
* The returned transform is of a non-determinate type. Most people
* should use one of the more specific allocation functions such as
- * crypto_alloc_blkcipher.
+ * crypto_alloc_skcipher().
*
* In case of error the return value is an error pointer.
*/
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 3f0ed9402582..775e7138fd10 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -91,15 +91,12 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- goto badkey;
+ goto out;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
- crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
- CRYPTO_TFM_RES_MASK);
-
if (err)
goto out;
@@ -107,16 +104,9 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
- crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) &
- CRYPTO_TFM_RES_MASK);
-
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
@@ -383,12 +373,12 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
+ struct authenc_instance_ctx *ctx;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct skcipher_alg *enc;
- struct authenc_instance_ctx *ctx;
- const char *enc_name;
int err;
algt = crypto_get_attr_type(tb);
@@ -398,38 +388,24 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
- auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK |
- crypto_requires_sync(algt->type, algt->mask));
- if (IS_ERR(auth))
- return PTR_ERR(auth);
-
- auth_base = &auth->base;
-
- enc_name = crypto_attr_alg_name(tb[2]);
- err = PTR_ERR(enc_name);
- if (IS_ERR(enc_name))
- goto out_put_auth;
+ mask = crypto_requires_sync(algt->type, algt->mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- err = -ENOMEM;
if (!inst)
- goto out_put_auth;
-
+ return -ENOMEM;
ctx = aead_instance_ctx(inst);
- err = crypto_init_ahash_spawn(&ctx->auth, auth,
- aead_crypto_instance(inst));
+ err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
+ auth = crypto_spawn_ahash_alg(&ctx->auth);
+ auth_base = &auth->base;
- crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
- goto err_drop_auth;
-
+ goto err_free_inst;
enc = crypto_spawn_skcipher_alg(&ctx->enc);
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
@@ -440,12 +416,12 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
"authenc(%s,%s)", auth_base->cra_name,
enc->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
- goto err_drop_enc;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth_base->cra_driver_name,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_drop_enc;
+ goto err_free_inst;
inst->alg.base.cra_flags = (auth_base->cra_flags |
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
@@ -470,21 +446,11 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
inst->free = crypto_authenc_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto err_drop_enc;
-
-out:
- crypto_mod_put(auth_base);
- return err;
-
-err_drop_enc:
- crypto_drop_skcipher(&ctx->enc);
-err_drop_auth:
- crypto_drop_ahash(&ctx->auth);
+ if (err) {
err_free_inst:
- kfree(inst);
-out_put_auth:
- goto out;
+ crypto_authenc_free(inst);
+ }
+ return err;
}
static struct crypto_template crypto_authenc_tmpl = {
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index adb7554fca29..589008146fce 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -65,15 +65,12 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- goto badkey;
+ goto out;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
- crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
- CRYPTO_TFM_RES_MASK);
-
if (err)
goto out;
@@ -81,16 +78,9 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
- crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) &
- CRYPTO_TFM_RES_MASK);
-
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
@@ -401,12 +391,12 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
+ struct authenc_esn_instance_ctx *ctx;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct skcipher_alg *enc;
- struct authenc_esn_instance_ctx *ctx;
- const char *enc_name;
int err;
algt = crypto_get_attr_type(tb);
@@ -416,50 +406,36 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
- auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK |
- crypto_requires_sync(algt->type, algt->mask));
- if (IS_ERR(auth))
- return PTR_ERR(auth);
-
- auth_base = &auth->base;
-
- enc_name = crypto_attr_alg_name(tb[2]);
- err = PTR_ERR(enc_name);
- if (IS_ERR(enc_name))
- goto out_put_auth;
+ mask = crypto_requires_sync(algt->type, algt->mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- err = -ENOMEM;
if (!inst)
- goto out_put_auth;
-
+ return -ENOMEM;
ctx = aead_instance_ctx(inst);
- err = crypto_init_ahash_spawn(&ctx->auth, auth,
- aead_crypto_instance(inst));
+ err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
+ auth = crypto_spawn_ahash_alg(&ctx->auth);
+ auth_base = &auth->base;
- crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
- goto err_drop_auth;
-
+ goto err_free_inst;
enc = crypto_spawn_skcipher_alg(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_name,
enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_drop_enc;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_driver_name,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_drop_enc;
+ goto err_free_inst;
inst->alg.base.cra_flags = (auth_base->cra_flags |
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
@@ -485,21 +461,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
inst->free = crypto_authenc_esn_free,
err = aead_register_instance(tmpl, inst);
- if (err)
- goto err_drop_enc;
-
-out:
- crypto_mod_put(auth_base);
- return err;
-
-err_drop_enc:
- crypto_drop_skcipher(&ctx->enc);
-err_drop_auth:
- crypto_drop_ahash(&ctx->auth);
+ if (err) {
err_free_inst:
- kfree(inst);
-out_put_auth:
- goto out;
+ crypto_authenc_esn_free(inst);
+ }
+ return err;
}
static struct crypto_template crypto_authenc_esn_tmpl = {
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index d04b1788dc42..1d262374fa4e 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -147,10 +147,8 @@ static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2B_KEYBYTES) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen == 0 || keylen > BLAKE2B_KEYBYTES)
return -EINVAL;
- }
memcpy(tctx->key, key, keylen);
tctx->keylen = keylen;
diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
index ed0c74640470..005783ff45ad 100644
--- a/crypto/blake2s_generic.c
+++ b/crypto/blake2s_generic.c
@@ -17,10 +17,8 @@ static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
return -EINVAL;
- }
memcpy(tctx->key, key, keylen);
tctx->keylen = keylen;
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index b6a1121e2478..9a5783e5196a 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -970,12 +970,9 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
const unsigned char *key = (const unsigned char *)in_key;
- u32 *flags = &tfm->crt_flags;
- if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
- }
cctx->key_length = key_len;
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index a8248f8e2777..c77ff6c8a2b2 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -103,17 +103,14 @@ static inline void W(u32 *key, unsigned int i)
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
-int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key,
- unsigned key_len, u32 *flags)
+int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, unsigned int key_len)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
- if (key_len % 4 != 0) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len % 4 != 0)
return -EINVAL;
- }
memset(p_key, 0, 32);
memcpy(p_key, in_key, key_len);
@@ -148,13 +145,12 @@ EXPORT_SYMBOL_GPL(__cast6_setkey);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
- return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen,
- &tfm->crt_flags);
+ return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen);
}
EXPORT_SYMBOL_GPL(cast6_setkey);
/*forward quad round*/
-static inline void Q(u32 *block, u8 *Kr, u32 *Km)
+static inline void Q(u32 *block, const u8 *Kr, const u32 *Km)
{
u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]);
@@ -164,7 +160,7 @@ static inline void Q(u32 *block, u8 *Kr, u32 *Km)
}
/*reverse quad round*/
-static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
+static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km)
{
u32 I;
block[3] ^= F1(block[0], Kr[3], Km[3]);
@@ -173,13 +169,14 @@ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
block[2] ^= F1(block[3], Kr[0], Km[0]);
}
-void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
+void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
{
+ const struct cast6_ctx *c = ctx;
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
- u32 *Km;
- u8 *Kr;
+ const u32 *Km;
+ const u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
@@ -211,13 +208,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
__cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
-void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
+void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
{
+ const struct cast6_ctx *c = ctx;
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
- u32 *Km;
- u8 *Kr;
+ const u32 *Km;
+ const u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
diff --git a/crypto/cbc.c b/crypto/cbc.c
index dd96bcf4d4b6..e6f6273a7d39 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -54,10 +54,12 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
+ alg = skcipher_ialg_simple(inst);
+
err = -EINVAL;
if (!is_power_of_2(alg->cra_blocksize))
goto out_free_inst;
@@ -66,14 +68,11 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.decrypt = crypto_cbc_decrypt;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto out_free_inst;
- goto out_put_alg;
-
+ if (err) {
out_free_inst:
- inst->free(inst);
-out_put_alg:
- crypto_mod_put(alg);
+ inst->free(inst);
+ }
+
return err;
}
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 380eb619f657..241ecdc5c4e0 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -15,8 +15,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include "internal.h"
-
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
struct crypto_ahash_spawn mac;
@@ -91,26 +89,19 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_skcipher *ctr = ctx->ctr;
struct crypto_ahash *mac = ctx->mac;
- int err = 0;
+ int err;
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
- goto out;
+ return err;
crypto_ahash_clear_flags(mac, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(mac, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(mac, key, keylen);
- crypto_aead_set_flags(aead, crypto_ahash_get_flags(mac) &
- CRYPTO_TFM_RES_MASK);
-
-out:
- return err;
+ return crypto_ahash_setkey(mac, key, keylen);
}
static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
@@ -457,11 +448,11 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
const char *mac_name)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
+ struct ccm_instance_ctx *ictx;
struct skcipher_alg *ctr;
- struct crypto_alg *mac_alg;
struct hash_alg_common *mac;
- struct ccm_instance_ctx *ictx;
int err;
algt = crypto_get_attr_type(tb);
@@ -471,37 +462,28 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
- mac_alg = crypto_find_alg(mac_name, &crypto_ahash_type,
- CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK |
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(mac_alg))
- return PTR_ERR(mac_alg);
-
- mac = __crypto_hash_alg_common(mac_alg);
- err = -EINVAL;
- if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
- mac->digestsize != 16)
- goto out_put_mac;
+ mask = crypto_requires_sync(algt->type, algt->mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
- err = -ENOMEM;
if (!inst)
- goto out_put_mac;
-
+ return -ENOMEM;
ictx = aead_instance_ctx(inst);
- err = crypto_init_ahash_spawn(&ictx->mac, mac,
- aead_crypto_instance(inst));
+
+ err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst),
+ mac_name, 0, CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
+ mac = crypto_spawn_ahash_alg(&ictx->mac);
- crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
- if (err)
- goto err_drop_mac;
+ err = -EINVAL;
+ if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
+ mac->digestsize != 16)
+ goto err_free_inst;
+ err = crypto_grab_skcipher(&ictx->ctr, aead_crypto_instance(inst),
+ ctr_name, 0, mask);
+ if (err)
+ goto err_free_inst;
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
@@ -509,21 +491,21 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
- goto err_drop_ctr;
+ goto err_free_inst;
/* ctr and cbcmac must use the same underlying block cipher. */
if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
- goto err_drop_ctr;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
- goto err_drop_ctr;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_drop_ctr;
+ goto err_free_inst;
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority +
@@ -545,20 +527,11 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
inst->free = crypto_ccm_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto err_drop_ctr;
-
-out_put_mac:
- crypto_mod_put(mac_alg);
- return err;
-
-err_drop_ctr:
- crypto_drop_skcipher(&ictx->ctr);
-err_drop_mac:
- crypto_drop_ahash(&ictx->mac);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out_put_mac;
+ crypto_ccm_free(inst);
+ }
+ return err;
}
static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -604,7 +577,6 @@ static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
{
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
- int err;
if (keylen < 3)
return -EINVAL;
@@ -615,11 +587,7 @@ static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(child, key, keylen);
- crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_aead_setkey(child, key, keylen);
}
static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
@@ -745,6 +713,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
@@ -758,6 +727,8 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
+ mask = crypto_requires_sync(algt->type, algt->mask);
+
ccm_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ccm_name))
return PTR_ERR(ccm_name);
@@ -767,9 +738,8 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
return -ENOMEM;
spawn = aead_instance_ctx(inst);
- crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(spawn, ccm_name, 0,
- crypto_requires_sync(algt->type, algt->mask));
+ err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
+ ccm_name, 0, mask);
if (err)
goto out_free_inst;
@@ -896,7 +866,7 @@ static int cbcmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
@@ -917,6 +887,7 @@ static void cbcmac_exit_tfm(struct crypto_tfm *tfm)
static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
+ struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
int err;
@@ -924,21 +895,20 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
return err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = shash_instance_ctx(inst);
- inst = shash_alloc_instance("cbcmac", alg);
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, 0);
+ if (err)
+ goto err_free_inst;
+ alg = crypto_spawn_cipher_alg(spawn);
- err = crypto_init_spawn(shash_instance_ctx(inst), alg,
- shash_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
+ err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = 1;
@@ -957,14 +927,13 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.final = crypto_cbcmac_digest_final;
inst->alg.setkey = crypto_cbcmac_digest_setkey;
- err = shash_register_instance(tmpl, inst);
+ inst->free = shash_free_singlespawn_instance;
-out_free_inst:
- if (err)
- shash_free_instance(shash_crypto_instance(inst));
-
-out_put_alg:
- crypto_mod_put(alg);
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ shash_free_singlespawn_instance(inst);
+ }
return err;
}
@@ -972,7 +941,6 @@ static struct crypto_template crypto_ccm_tmpls[] = {
{
.name = "cbcmac",
.create = cbcmac_create,
- .free = shash_free_instance,
.module = THIS_MODULE,
}, {
.name = "ccm_base",
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 7b68fbb61732..4e5219bbcd19 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -203,10 +203,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
+ alg = skcipher_ialg_simple(inst);
+
/* CFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
@@ -223,7 +225,6 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
inst->free(inst);
- crypto_mod_put(alg);
return err;
}
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 74e824e537e6..ccaea5cb66d1 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -16,8 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "internal.h"
-
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
struct crypto_ahash_spawn poly;
@@ -477,7 +475,6 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(aead);
- int err;
if (keylen != ctx->saltlen + CHACHA_KEY_SIZE)
return -EINVAL;
@@ -488,11 +485,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
-
- err = crypto_skcipher_setkey(ctx->chacha, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_skcipher_setkey(ctx->chacha, key, keylen);
}
static int chachapoly_setauthsize(struct crypto_aead *tfm,
@@ -563,12 +556,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
const char *name, unsigned int ivsize)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
- struct skcipher_alg *chacha;
- struct crypto_alg *poly;
- struct hash_alg_common *poly_hash;
struct chachapoly_instance_ctx *ctx;
- const char *chacha_name, *poly_name;
+ struct skcipher_alg *chacha;
+ struct hash_alg_common *poly;
int err;
if (ivsize > CHACHAPOLY_IV_SIZE)
@@ -581,72 +573,53 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
- chacha_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(chacha_name))
- return PTR_ERR(chacha_name);
- poly_name = crypto_attr_alg_name(tb[2]);
- if (IS_ERR(poly_name))
- return PTR_ERR(poly_name);
-
- poly = crypto_find_alg(poly_name, &crypto_ahash_type,
- CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK |
- crypto_requires_sync(algt->type,
- algt->mask));
- if (IS_ERR(poly))
- return PTR_ERR(poly);
- poly_hash = __crypto_hash_alg_common(poly);
+ mask = crypto_requires_sync(algt->type, algt->mask);
- err = -EINVAL;
- if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
- goto out_put_poly;
-
- err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
- goto out_put_poly;
-
+ return -ENOMEM;
ctx = aead_instance_ctx(inst);
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
- aead_crypto_instance(inst));
+
+ err = crypto_grab_skcipher(&ctx->chacha, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
+ chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
- crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
- goto err_drop_poly;
-
- chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
+ goto err_free_inst;
+ poly = crypto_spawn_ahash_alg(&ctx->poly);
err = -EINVAL;
+ if (poly->digestsize != POLY1305_DIGEST_SIZE)
+ goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
- goto out_drop_chacha;
+ goto err_free_inst;
/* Not a stream cipher? */
if (chacha->base.cra_blocksize != 1)
- goto out_drop_chacha;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_name,
- poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_chacha;
+ poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name,
- poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_chacha;
+ poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
- inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) &
- CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_flags = (chacha->base.cra_flags |
+ poly->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (chacha->base.cra_priority +
- poly->cra_priority) / 2;
+ poly->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
- poly->cra_alignmask;
+ poly->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
ctx->saltlen;
inst->alg.ivsize = ivsize;
@@ -662,20 +635,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
inst->free = chachapoly_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto out_drop_chacha;
-
-out_put_poly:
- crypto_mod_put(poly);
- return err;
-
-out_drop_chacha:
- crypto_drop_skcipher(&ctx->chacha);
-err_drop_poly:
- crypto_drop_ahash(&ctx->poly);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out_put_poly;
+ chachapoly_free(inst);
+ }
+ return err;
}
static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 108427026e7c..fd78150deb1c 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -2,7 +2,7 @@
/*
* Cryptographic API.
*
- * Cipher operations.
+ * Single-block cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
@@ -16,11 +16,11 @@
#include <linux/string.h>
#include "internal.h"
-static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
+static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ struct cipher_alg *cia = crypto_cipher_alg(tfm);
+ unsigned long alignmask = crypto_cipher_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
@@ -32,83 +32,60 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
- ret = cia->cia_setkey(tfm, alignbuffer, keylen);
+ ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
-static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ struct cipher_alg *cia = crypto_cipher_alg(tfm);
+ unsigned long alignmask = crypto_cipher_alignmask(tfm);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize)
return -EINVAL;
- }
if ((unsigned long)key & alignmask)
return setkey_unaligned(tfm, key, keylen);
- return cia->cia_setkey(tfm, key, keylen);
-}
-
-static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
- const u8 *),
- struct crypto_tfm *tfm,
- u8 *dst, const u8 *src)
-{
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- unsigned int size = crypto_tfm_alg_blocksize(tfm);
- u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
- u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
-
- memcpy(tmp, src, size);
- fn(tfm, tmp, tmp);
- memcpy(dst, tmp, size);
+ return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen);
}
+EXPORT_SYMBOL_GPL(crypto_cipher_setkey);
-static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
- u8 *dst, const u8 *src)
+static inline void cipher_crypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src, bool enc)
{
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
+ unsigned long alignmask = crypto_cipher_alignmask(tfm);
+ struct cipher_alg *cia = crypto_cipher_alg(tfm);
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ enc ? cia->cia_encrypt : cia->cia_decrypt;
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
- cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
- return;
+ unsigned int bs = crypto_cipher_blocksize(tfm);
+ u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
+ u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+
+ memcpy(tmp, src, bs);
+ fn(crypto_cipher_tfm(tfm), tmp, tmp);
+ memcpy(dst, tmp, bs);
+ } else {
+ fn(crypto_cipher_tfm(tfm), dst, src);
}
-
- cipher->cia_encrypt(tfm, dst, src);
}
-static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
- u8 *dst, const u8 *src)
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src)
{
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
-
- if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
- cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
- return;
- }
-
- cipher->cia_decrypt(tfm, dst, src);
+ cipher_crypt_one(tfm, dst, src, true);
}
+EXPORT_SYMBOL_GPL(crypto_cipher_encrypt_one);
-int crypto_init_cipher_ops(struct crypto_tfm *tfm)
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src)
{
- struct cipher_tfm *ops = &tfm->crt_cipher;
- struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
-
- ops->cit_setkey = setkey;
- ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
- cipher_encrypt_unaligned : cipher->cia_encrypt;
- ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
- cipher_decrypt_unaligned : cipher->cia_decrypt;
-
- return 0;
+ cipher_crypt_one(tfm, dst, src, false);
}
+EXPORT_SYMBOL_GPL(crypto_cipher_decrypt_one);
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 0928aebc6205..143a6544c873 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -201,7 +201,7 @@ static int cmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
@@ -222,6 +222,7 @@ static void cmac_exit_tfm(struct crypto_tfm *tfm)
static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
+ struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
int err;
@@ -230,10 +231,16 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
return err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = shash_instance_ctx(inst);
+
+ err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, 0);
+ if (err)
+ goto err_free_inst;
+ alg = crypto_spawn_cipher_alg(spawn);
switch (alg->cra_blocksize) {
case 16:
@@ -241,19 +248,12 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
break;
default:
err = -EINVAL;
- goto out_put_alg;
+ goto err_free_inst;
}
- inst = shash_alloc_instance("cmac", alg);
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- err = crypto_init_spawn(shash_instance_ctx(inst), alg,
- shash_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
+ err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
alignmask = alg->cra_alignmask;
inst->alg.base.cra_alignmask = alignmask;
@@ -280,21 +280,19 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.final = crypto_cmac_digest_final;
inst->alg.setkey = crypto_cmac_digest_setkey;
+ inst->free = shash_free_singlespawn_instance;
+
err = shash_register_instance(tmpl, inst);
if (err) {
-out_free_inst:
- shash_free_instance(shash_crypto_instance(inst));
+err_free_inst:
+ shash_free_singlespawn_instance(inst);
}
-
-out_put_alg:
- crypto_mod_put(alg);
return err;
}
static struct crypto_template crypto_cmac_tmpl = {
.name = "cmac",
.create = cmac_create,
- .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/compress.c b/crypto/compress.c
index e9edf8524787..9048fe390c46 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -6,34 +6,27 @@
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*/
-#include <linux/types.h>
#include <linux/crypto.h>
-#include <linux/errno.h>
-#include <linux/string.h>
#include "internal.h"
-static int crypto_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+int crypto_comp_compress(struct crypto_comp *comp,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
+ struct crypto_tfm *tfm = crypto_comp_tfm(comp);
+
return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
dlen);
}
+EXPORT_SYMBOL_GPL(crypto_comp_compress);
-static int crypto_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+int crypto_comp_decompress(struct crypto_comp *comp,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
+ struct crypto_tfm *tfm = crypto_comp_tfm(comp);
+
return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
dlen);
}
-
-int crypto_init_compress_ops(struct crypto_tfm *tfm)
-{
- struct compress_tfm *ops = &tfm->crt_compress;
-
- ops->cot_compress = crypto_compress;
- ops->cot_decompress = crypto_decompress;
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(crypto_comp_decompress);
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 9e97912280bd..0e103fb5dd77 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -60,10 +60,8 @@ static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
{
u32 *mctx = crypto_shash_ctx(hash);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
*mctx = get_unaligned_le32(key);
return 0;
}
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7b25fe82072c..7fa9b0788685 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -74,10 +74,8 @@ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(mctx->key)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(mctx->key))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 2c6649b10923..d94c75c840a5 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -221,48 +221,17 @@ static int cryptd_init_instance(struct crypto_instance *inst,
return 0;
}
-static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail)
-{
- char *p;
- struct crypto_instance *inst;
- int err;
-
- p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- inst = (void *)(p + head);
-
- err = cryptd_init_instance(inst, alg);
- if (err)
- goto out_free_inst;
-
-out:
- return p;
-
-out_free_inst:
- kfree(p);
- p = ERR_PTR(err);
- goto out;
-}
-
static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_sync_skcipher *child = ctx->child;
- int err;
crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(child,
crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent,
- crypto_sync_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_sync_skcipher_setkey(child, key, keylen);
}
static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
@@ -421,8 +390,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
ctx = skcipher_instance_ctx(inst);
ctx->queue = queue;
- crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
+ err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
+ name, type, mask);
if (err)
goto out_free_inst;
@@ -491,15 +460,11 @@ static int cryptd_hash_setkey(struct crypto_ahash *parent,
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_shash *child = ctx->child;
- int err;
crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_shash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_shash_setkey(child, key, keylen);
}
static int cryptd_hash_enqueue(struct ahash_request *req,
@@ -666,44 +631,49 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
return crypto_shash_import(desc, in);
}
+static void cryptd_hash_free(struct ahash_instance *inst)
+{
+ struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
+
+ crypto_drop_shash(&ctx->spawn);
+ kfree(inst);
+}
+
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct cryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
- struct shash_alg *salg;
- struct crypto_alg *alg;
+ struct shash_alg *alg;
u32 type = 0;
u32 mask = 0;
int err;
cryptd_check_internal(tb, &type, &mask);
- salg = shash_attr_alg(tb[1], type, mask);
- if (IS_ERR(salg))
- return PTR_ERR(salg);
-
- alg = &salg->base;
- inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
- sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
ctx = ahash_instance_ctx(inst);
ctx->queue = queue;
- err = crypto_init_shash_spawn(&ctx->spawn, salg,
- ahash_crypto_instance(inst));
+ err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), type, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
+ alg = crypto_spawn_shash_alg(&ctx->spawn);
+
+ err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
+ if (err)
+ goto err_free_inst;
inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_OPTIONAL_KEY));
+ (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
- inst->alg.halg.digestsize = salg->digestsize;
- inst->alg.halg.statesize = salg->statesize;
+ inst->alg.halg.digestsize = alg->digestsize;
+ inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
@@ -715,19 +685,18 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- if (crypto_shash_alg_has_setkey(salg))
+ if (crypto_shash_alg_has_setkey(alg))
inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
+ inst->free = cryptd_hash_free;
+
err = ahash_register_instance(tmpl, inst);
if (err) {
+err_free_inst:
crypto_drop_shash(&ctx->spawn);
-out_free_inst:
kfree(inst);
}
-
-out_put_alg:
- crypto_mod_put(alg);
return err;
}
@@ -849,6 +818,14 @@ static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->child);
}
+static void cryptd_aead_free(struct aead_instance *inst)
+{
+ struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
+
+ crypto_drop_aead(&ctx->aead_spawn);
+ kfree(inst);
+}
+
static int cryptd_create_aead(struct crypto_template *tmpl,
struct rtattr **tb,
struct cryptd_queue *queue)
@@ -874,8 +851,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
ctx = aead_instance_ctx(inst);
ctx->queue = queue;
- crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
+ err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
+ name, type, mask);
if (err)
goto out_free_inst;
@@ -898,6 +875,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
+ inst->free = cryptd_aead_free;
+
err = aead_register_instance(tmpl, inst);
if (err) {
out_drop_aead:
@@ -930,31 +909,9 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
return -EINVAL;
}
-static void cryptd_free(struct crypto_instance *inst)
-{
- struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
- struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
- struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
-
- switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_shash(&hctx->spawn);
- kfree(ahash_instance(inst));
- return;
- case CRYPTO_ALG_TYPE_AEAD:
- crypto_drop_aead(&aead_ctx->aead_spawn);
- kfree(aead_instance(inst));
- return;
- default:
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
- }
-}
-
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
.create = cryptd_create,
- .free = cryptd_free,
.module = THIS_MODULE,
};
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index b785c476de67..3fa20f12989f 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -323,7 +323,8 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
- err = crypto_unregister_instance((struct crypto_instance *)alg);
+ crypto_unregister_instance((struct crypto_instance *)alg);
+ err = 0;
drop_alg:
crypto_mod_put(alg);
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 70a3fccb82f3..a8feab621c6c 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -129,10 +129,12 @@ static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
+ alg = skcipher_ialg_simple(inst);
+
/* Block size must be >= 4 bytes. */
err = -EINVAL;
if (alg->cra_blocksize < 4)
@@ -155,14 +157,11 @@ static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.decrypt = crypto_ctr_crypt;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto out_free_inst;
- goto out_put_alg;
-
+ if (err) {
out_free_inst:
- inst->free(inst);
-out_put_alg:
- crypto_mod_put(alg);
+ inst->free(inst);
+ }
+
return err;
}
@@ -171,7 +170,6 @@ static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
{
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
- int err;
/* the nonce is stored in bytes at end of key */
if (keylen < CTR_RFC3686_NONCE_SIZE)
@@ -185,11 +183,7 @@ static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_skcipher_setkey(child, key, keylen);
}
static int crypto_rfc3686_crypt(struct skcipher_request *req)
@@ -292,8 +286,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
spawn = skcipher_instance_ctx(inst);
- crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(spawn, cipher_name, 0, mask);
+ err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
+ cipher_name, 0, mask);
if (err)
goto err_free_inst;
diff --git a/crypto/cts.c b/crypto/cts.c
index 6b6087dbb62a..48188adc8e91 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -78,15 +78,11 @@ static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
{
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
- int err;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_skcipher_setkey(child, key, keylen);
}
static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
@@ -332,6 +328,7 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_attr_type *algt;
struct skcipher_alg *alg;
const char *cipher_name;
+ u32 mask;
int err;
algt = crypto_get_attr_type(tb);
@@ -341,6 +338,8 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return -EINVAL;
+ mask = crypto_requires_sync(algt->type, algt->mask);
+
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
@@ -351,10 +350,8 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
- crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
+ cipher_name, 0, mask);
if (err)
goto err_free_inst;
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 6e13a4a29ecb..c85354a5e94c 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -29,11 +29,8 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
else
err = 0;
}
-
- if (err) {
+ if (err)
memset(dctx, 0, sizeof(*dctx));
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
- }
return err;
}
@@ -64,11 +61,8 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
else
err = 0;
}
-
- if (err) {
+ if (err)
memset(dctx, 0, sizeof(*dctx));
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
- }
return err;
}
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 9d6981ca7d5d..69a687cbdf21 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -61,10 +61,9 @@ static int crypto_ecb_decrypt(struct skcipher_request *req)
static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
@@ -76,7 +75,7 @@ static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
- crypto_mod_put(alg);
+
return err;
}
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index a49cbf7b0929..4a2f02baba14 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -133,29 +133,17 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
- inst->free = aead_geniv_free;
-
err = aead_register_instance(tmpl, inst);
- if (err)
- goto free_inst;
-
-out:
- return err;
-
+ if (err) {
free_inst:
- aead_geniv_free(inst);
- goto out;
-}
-
-static void echainiv_free(struct crypto_instance *inst)
-{
- aead_geniv_free(aead_instance(inst));
+ inst->free(inst);
+ }
+ return err;
}
static struct crypto_template echainiv_tmpl = {
.name = "echainiv",
.create = echainiv_aead_create,
- .free = echainiv_free,
.module = THIS_MODULE,
};
diff --git a/crypto/essiv.c b/crypto/essiv.c
index 495a2d1e1460..465a89c9d1ef 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -75,9 +75,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->u.skcipher, key, keylen);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->u.skcipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -90,13 +87,8 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
crypto_cipher_set_flags(tctx->essiv_cipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(tctx->essiv_cipher, salt,
- crypto_shash_digestsize(tctx->hash));
- crypto_skcipher_set_flags(tfm,
- crypto_cipher_get_flags(tctx->essiv_cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_cipher_setkey(tctx->essiv_cipher, salt,
+ crypto_shash_digestsize(tctx->hash));
}
static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -112,15 +104,11 @@ static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_aead_set_flags(tctx->u.aead, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(tctx->u.aead, key, keylen);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(tctx->u.aead) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
return -EINVAL;
- }
desc->tfm = tctx->hash;
err = crypto_shash_init(desc) ?:
@@ -132,12 +120,8 @@ static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_cipher_clear_flags(tctx->essiv_cipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tctx->essiv_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(tctx->essiv_cipher, salt,
- crypto_shash_digestsize(tctx->hash));
- crypto_aead_set_flags(tfm, crypto_cipher_get_flags(tctx->essiv_cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_cipher_setkey(tctx->essiv_cipher, salt,
+ crypto_shash_digestsize(tctx->hash));
}
static int essiv_aead_setauthsize(struct crypto_aead *tfm,
@@ -442,7 +426,7 @@ static bool essiv_supported_algorithms(const char *essiv_cipher_name,
if (ivsize != alg->cra_blocksize)
goto out;
- if (crypto_shash_alg_has_setkey(hash_alg))
+ if (crypto_shash_alg_needs_key(hash_alg))
goto out;
ret = true;
@@ -468,6 +452,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_alg *hash_alg;
int ivsize;
u32 type;
+ u32 mask;
int err;
algt = crypto_get_attr_type(tb);
@@ -483,6 +468,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(shash_name);
type = algt->type & algt->mask;
+ mask = crypto_requires_sync(algt->type, algt->mask);
switch (type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
@@ -495,11 +481,8 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = crypto_instance_ctx(inst);
/* Symmetric cipher, e.g., "cbc(aes)" */
- crypto_set_skcipher_spawn(&ictx->u.skcipher_spawn, inst);
- err = crypto_grab_skcipher(&ictx->u.skcipher_spawn,
- inner_cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ictx->u.skcipher_spawn, inst,
+ inner_cipher_name, 0, mask);
if (err)
goto out_free_inst;
skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn);
@@ -517,11 +500,8 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = crypto_instance_ctx(inst);
/* AEAD cipher, e.g., "authenc(hmac(sha256),cbc(aes))" */
- crypto_set_aead_spawn(&ictx->u.aead_spawn, inst);
- err = crypto_grab_aead(&ictx->u.aead_spawn,
- inner_cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_aead(&ictx->u.aead_spawn, inst,
+ inner_cipher_name, 0, mask);
if (err)
goto out_free_inst;
aead_alg = crypto_spawn_aead_alg(&ictx->u.aead_spawn);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 73884208f075..8e5c0ac65661 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -13,7 +13,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
-#include "internal.h"
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -111,8 +110,6 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -141,9 +138,6 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
- crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
- CRYPTO_TFM_RES_MASK);
-
out:
kzfree(data);
return err;
@@ -585,11 +579,11 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
const char *ghash_name)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
+ struct gcm_instance_ctx *ctx;
struct skcipher_alg *ctr;
- struct crypto_alg *ghash_alg;
struct hash_alg_common *ghash;
- struct gcm_instance_ctx *ctx;
int err;
algt = crypto_get_attr_type(tb);
@@ -599,39 +593,28 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
- ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
- CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_AHASH_MASK |
- crypto_requires_sync(algt->type,
- algt->mask));
- if (IS_ERR(ghash_alg))
- return PTR_ERR(ghash_alg);
-
- ghash = __crypto_hash_alg_common(ghash_alg);
+ mask = crypto_requires_sync(algt->type, algt->mask);
- err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
- goto out_put_ghash;
-
+ return -ENOMEM;
ctx = aead_instance_ctx(inst);
- err = crypto_init_ahash_spawn(&ctx->ghash, ghash,
- aead_crypto_instance(inst));
+
+ err = crypto_grab_ahash(&ctx->ghash, aead_crypto_instance(inst),
+ ghash_name, 0, mask);
if (err)
goto err_free_inst;
+ ghash = crypto_spawn_ahash_alg(&ctx->ghash);
err = -EINVAL;
if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
ghash->digestsize != 16)
- goto err_drop_ghash;
+ goto err_free_inst;
- crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
- err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst),
+ ctr_name, 0, mask);
if (err)
- goto err_drop_ghash;
-
+ goto err_free_inst;
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
@@ -639,18 +622,18 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
- goto out_put_ctr;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
- goto out_put_ctr;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
- ghash_alg->cra_driver_name) >=
+ ghash->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto out_put_ctr;
+ goto err_free_inst;
inst->alg.base.cra_flags = (ghash->base.cra_flags |
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
@@ -673,20 +656,11 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
inst->free = crypto_gcm_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto out_put_ctr;
-
-out_put_ghash:
- crypto_mod_put(ghash_alg);
- return err;
-
-out_put_ctr:
- crypto_drop_skcipher(&ctx->ctr);
-err_drop_ghash:
- crypto_drop_ahash(&ctx->ghash);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out_put_ghash;
+ crypto_gcm_free(inst);
+ }
+ return err;
}
static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -727,7 +701,6 @@ static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
{
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
- int err;
if (keylen < 4)
return -EINVAL;
@@ -738,11 +711,7 @@ static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(child, key, keylen);
- crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_aead_setkey(child, key, keylen);
}
static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
@@ -867,6 +836,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
@@ -880,6 +850,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
+ mask = crypto_requires_sync(algt->type, algt->mask);
+
ccm_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ccm_name))
return PTR_ERR(ccm_name);
@@ -889,9 +861,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
return -ENOMEM;
spawn = aead_instance_ctx(inst);
- crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(spawn, ccm_name, 0,
- crypto_requires_sync(algt->type, algt->mask));
+ err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
+ ccm_name, 0, mask);
if (err)
goto out_free_inst;
@@ -956,7 +927,6 @@ static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
{
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
- int err;
if (keylen < 4)
return -EINVAL;
@@ -967,11 +937,7 @@ static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(child, key, keylen);
- crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_aead_setkey(child, key, keylen);
}
static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
@@ -1103,6 +1069,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_attr_type *algt;
+ u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
@@ -1117,6 +1084,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return -EINVAL;
+ mask = crypto_requires_sync(algt->type, algt->mask);
+
ccm_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ccm_name))
return PTR_ERR(ccm_name);
@@ -1127,9 +1096,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
ctx = aead_instance_ctx(inst);
spawn = &ctx->aead;
- crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(spawn, ccm_name, 0,
- crypto_requires_sync(algt->type, algt->mask));
+ err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
+ ccm_name, 0, mask);
if (err)
goto out_free_inst;
diff --git a/crypto/geniv.c b/crypto/geniv.c
index b9e45a2a98b5..dbcc640274cd 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -32,6 +32,12 @@ static int aead_geniv_setauthsize(struct crypto_aead *tfm,
return crypto_aead_setauthsize(ctx->child, authsize);
}
+static void aead_geniv_free(struct aead_instance *inst)
+{
+ crypto_drop_aead(aead_instance_ctx(inst));
+ kfree(inst);
+}
+
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask)
{
@@ -64,8 +70,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
/* Ignore async algorithms if necessary. */
mask |= crypto_requires_sync(algt->type, algt->mask);
- crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(spawn, name, type, mask);
+ err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
+ name, type, mask);
if (err)
goto err_free_inst;
@@ -100,6 +106,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
inst->alg.ivsize = ivsize;
inst->alg.maxauthsize = maxauthsize;
+ inst->free = aead_geniv_free;
+
out:
return inst;
@@ -112,13 +120,6 @@ err_free_inst:
}
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
-void aead_geniv_free(struct aead_instance *inst)
-{
- crypto_drop_aead(aead_instance_ctx(inst));
- kfree(inst);
-}
-EXPORT_SYMBOL_GPL(aead_geniv_free);
-
int aead_init_geniv(struct crypto_aead *aead)
{
struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 5027b3461c92..c70d163c1ac9 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -58,10 +58,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
be128 k;
- if (keylen != GHASH_BLOCK_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
- }
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 8b2a212eb0ad..e38bfb948278 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -138,12 +138,11 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
crypto_shash_finup(desc, out, ds, out);
}
-static int hmac_init_tfm(struct crypto_tfm *tfm)
+static int hmac_init_tfm(struct crypto_shash *parent)
{
- struct crypto_shash *parent = __crypto_shash_cast(tfm);
struct crypto_shash *hash;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
+ struct shash_instance *inst = shash_alg_instance(parent);
+ struct crypto_shash_spawn *spawn = shash_instance_ctx(inst);
struct hmac_ctx *ctx = hmac_ctx(parent);
hash = crypto_spawn_shash(spawn);
@@ -152,24 +151,21 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
parent->descsize = sizeof(struct shash_desc) +
crypto_shash_descsize(hash);
- if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) {
- crypto_free_shash(hash);
- return -EINVAL;
- }
ctx->hash = hash;
return 0;
}
-static void hmac_exit_tfm(struct crypto_tfm *tfm)
+static void hmac_exit_tfm(struct crypto_shash *parent)
{
- struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
+ struct hmac_ctx *ctx = hmac_ctx(parent);
crypto_free_shash(ctx->hash);
}
static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
+ struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
int err;
@@ -180,31 +176,32 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
return err;
- salg = shash_attr_alg(tb[1], 0, 0);
- if (IS_ERR(salg))
- return PTR_ERR(salg);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = shash_instance_ctx(inst);
+
+ err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, 0);
+ if (err)
+ goto err_free_inst;
+ salg = crypto_spawn_shash_alg(spawn);
alg = &salg->base;
- /* The underlying hash algorithm must be unkeyed */
+ /* The underlying hash algorithm must not require a key */
err = -EINVAL;
- if (crypto_shash_alg_has_setkey(salg))
- goto out_put_alg;
+ if (crypto_shash_alg_needs_key(salg))
+ goto err_free_inst;
ds = salg->digestsize;
ss = salg->statesize;
if (ds > alg->cra_blocksize ||
ss < alg->cra_blocksize)
- goto out_put_alg;
+ goto err_free_inst;
- inst = shash_alloc_instance("hmac", alg);
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
- shash_crypto_instance(inst));
+ err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
@@ -217,9 +214,6 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(ss * 2, crypto_tfm_ctx_alignment());
- inst->alg.base.cra_init = hmac_init_tfm;
- inst->alg.base.cra_exit = hmac_exit_tfm;
-
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
inst->alg.final = hmac_final;
@@ -227,22 +221,22 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
+ inst->alg.init_tfm = hmac_init_tfm;
+ inst->alg.exit_tfm = hmac_exit_tfm;
+
+ inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
-out_free_inst:
- shash_free_instance(shash_crypto_instance(inst));
+err_free_inst:
+ shash_free_singlespawn_instance(inst);
}
-
-out_put_alg:
- crypto_mod_put(alg);
return err;
}
static struct crypto_template hmac_tmpl = {
.name = "hmac",
.create = hmac_create,
- .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/internal.h b/crypto/internal.h
index 93df7bec844a..d5ebc60c5143 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -58,9 +58,6 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
-int crypto_init_cipher_ops(struct crypto_tfm *tfm);
-int crypto_init_compress_ops(struct crypto_tfm *tfm);
-
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
void crypto_alg_tested(const char *name, int err);
@@ -68,7 +65,6 @@ void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
-void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index a155c88105ea..0355cce21b1e 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -266,10 +266,12 @@ static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
+ alg = skcipher_ialg_simple(inst);
+
err = -EINVAL;
/* Section 5.1 requirement for KW */
if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
@@ -283,14 +285,11 @@ static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.decrypt = crypto_kw_decrypt;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto out_free_inst;
- goto out_put_alg;
-
+ if (err) {
out_free_inst:
- inst->free(inst);
-out_put_alg:
- crypto_mod_put(alg);
+ inst->free(inst);
+ }
+
return err;
}
diff --git a/crypto/lrw.c b/crypto/lrw.c
index be829f6afc8e..63c485c0d8a6 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -79,8 +79,6 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(child, key, keylen - bsize);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -303,6 +301,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
struct skcipher_alg *alg;
const char *cipher_name;
char ecb_name[CRYPTO_MAX_ALG_NAME];
+ u32 mask;
int err;
algt = crypto_get_attr_type(tb);
@@ -312,6 +311,8 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return -EINVAL;
+ mask = crypto_requires_sync(algt->type, algt->mask);
+
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
@@ -322,19 +323,17 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
- crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(spawn, cipher_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
+ cipher_name, 0, mask);
if (err == -ENOENT) {
err = -ENAMETOOLONG;
if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- err = crypto_grab_skcipher(spawn, ecb_name, 0,
- crypto_requires_sync(algt->type,
- algt->mask));
+ err = crypto_grab_skcipher(spawn,
+ skcipher_crypto_instance(inst),
+ ecb_name, 0, mask);
}
if (err)
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 20e6220f46f6..63350c4ad461 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -137,10 +137,8 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
const __le32 *data = (const __le32 *)key;
- if (keylen != 8) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != 8)
return -EINVAL;
- }
mctx->l = le32_to_cpu(data[0]);
mctx->r = le32_to_cpu(data[1]);
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index f6b6a52092b4..8a3006c3b51b 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -210,7 +210,7 @@ int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn)
if (state->nh_remaining)
process_nh_hash_value(state, key);
- poly1305_core_emit(&state->poly_state, dst);
+ poly1305_core_emit(&state->poly_state, NULL, dst);
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_final_helper);
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 133ff4c7f2c6..2ec68e3f2c55 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -55,10 +55,12 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
+ alg = skcipher_ialg_simple(inst);
+
/* OFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
@@ -75,7 +77,6 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
inst->free(inst);
- crypto_mod_put(alg);
return err;
}
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 862cdb8d8b6c..ae921fb74dc9 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -153,10 +153,9 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req)
static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_alg *alg;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
@@ -166,7 +165,7 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
- crypto_mod_put(alg);
+
return err;
}
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 543792e0ebf0..1b632139a8c1 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/notifier.h>
#include <linux/kobject.h>
#include <linux/cpu.h>
#include <crypto/pcrypt.h>
@@ -24,6 +23,8 @@ static struct kset *pcrypt_kset;
struct pcrypt_instance_ctx {
struct crypto_aead_spawn spawn;
+ struct padata_shell *psenc;
+ struct padata_shell *psdec;
atomic_t tfm_count;
};
@@ -32,6 +33,12 @@ struct pcrypt_aead_ctx {
unsigned int cb_cpu;
};
+static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx(
+ struct crypto_aead *tfm)
+{
+ return aead_instance_ctx(aead_alg_instance(tfm));
+}
+
static int pcrypt_aead_setkey(struct crypto_aead *parent,
const u8 *key, unsigned int keylen)
{
@@ -63,7 +70,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
struct padata_priv *padata = pcrypt_request_padata(preq);
padata->info = err;
- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
padata_do_serial(padata);
}
@@ -90,6 +96,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
+ struct pcrypt_instance_ctx *ictx;
+
+ ictx = pcrypt_tfm_ictx(aead);
memset(padata, 0, sizeof(struct padata_priv));
@@ -103,7 +112,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
req->cryptlen, req->iv);
aead_request_set_ad(creq, req->assoclen);
- err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu);
+ err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
@@ -132,6 +141,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
+ struct pcrypt_instance_ctx *ictx;
+
+ ictx = pcrypt_tfm_ictx(aead);
memset(padata, 0, sizeof(struct padata_priv));
@@ -145,7 +157,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
req->cryptlen, req->iv);
aead_request_set_ad(creq, req->assoclen);
- err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu);
+ err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
@@ -192,6 +204,8 @@ static void pcrypt_free(struct aead_instance *inst)
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_aead(&ctx->spawn);
+ padata_free_shell(ctx->psdec);
+ padata_free_shell(ctx->psenc);
kfree(inst);
}
@@ -233,12 +247,21 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
if (!inst)
return -ENOMEM;
+ err = -ENOMEM;
+
ctx = aead_instance_ctx(inst);
- crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
+ ctx->psenc = padata_alloc_shell(pencrypt);
+ if (!ctx->psenc)
+ goto out_free_inst;
+
+ ctx->psdec = padata_alloc_shell(pdecrypt);
+ if (!ctx->psdec)
+ goto out_free_psenc;
- err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
+ err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
+ name, 0, 0);
if (err)
- goto out_free_inst;
+ goto out_free_psdec;
alg = crypto_spawn_aead_alg(&ctx->spawn);
err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
@@ -271,6 +294,10 @@ out:
out_drop_aead:
crypto_drop_aead(&ctx->spawn);
+out_free_psdec:
+ padata_free_shell(ctx->psdec);
+out_free_psenc:
+ padata_free_shell(ctx->psenc);
out_free_inst:
kfree(inst);
goto out;
@@ -362,11 +389,12 @@ err:
static void __exit pcrypt_exit(void)
{
+ crypto_unregister_template(&pcrypt_tmpl);
+
pcrypt_fini_padata(pencrypt);
pcrypt_fini_padata(pdecrypt);
kset_unregister(pcrypt_kset);
- crypto_unregister_template(&pcrypt_tmpl);
}
subsys_initcall(pcrypt_init);
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 21edbd8c99fb..94af47eb6fa6 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -31,6 +31,29 @@ static int crypto_poly1305_init(struct shash_desc *desc)
return 0;
}
+static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ if (!dctx->sset) {
+ if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
+ poly1305_core_setkey(&dctx->core_r, src);
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ dctx->rset = 2;
+ }
+ if (srclen >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ }
+ return srclen;
+}
+
static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
unsigned int srclen)
{
@@ -42,7 +65,7 @@ static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
srclen = datalen;
}
- poly1305_core_blocks(&dctx->h, dctx->r, src,
+ poly1305_core_blocks(&dctx->h, &dctx->core_r, src,
srclen / POLY1305_BLOCK_SIZE, 1);
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 0aa489711ec4..176b63afec8d 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -598,6 +598,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const struct rsa_asn1_template *digest_info;
struct crypto_attr_type *algt;
+ u32 mask;
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
struct crypto_akcipher_spawn *spawn;
@@ -613,6 +614,8 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
return -EINVAL;
+ mask = crypto_requires_sync(algt->type, algt->mask);
+
rsa_alg_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(rsa_alg_name))
return PTR_ERR(rsa_alg_name);
@@ -636,9 +639,8 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = &ctx->spawn;
ctx->digest_info = digest_info;
- crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
- err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
- crypto_requires_sync(algt->type, algt->mask));
+ err = crypto_grab_akcipher(spawn, akcipher_crypto_instance(inst),
+ rsa_alg_name, 0, mask);
if (err)
goto out_free_inst;
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 4d50750d01c6..738f4f8f0f41 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -266,9 +266,9 @@ int crypto_register_scomp(struct scomp_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_register_scomp);
-int crypto_unregister_scomp(struct scomp_alg *alg)
+void crypto_unregister_scomp(struct scomp_alg *alg)
{
- return crypto_unregister_alg(&alg->base);
+ crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 96d222c32acc..f124b9b54e15 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -18,8 +18,6 @@
#include <linux/slab.h>
#include <linux/string.h>
-static void seqiv_free(struct crypto_instance *inst);
-
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
@@ -159,15 +157,11 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto free_inst;
-
-out:
- return err;
-
+ if (err) {
free_inst:
- aead_geniv_free(inst);
- goto out;
+ inst->free(inst);
+ }
+ return err;
}
static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -184,15 +178,9 @@ static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
return seqiv_aead_create(tmpl, tb);
}
-static void seqiv_free(struct crypto_instance *inst)
-{
- aead_geniv_free(aead_instance(inst));
-}
-
static struct crypto_template seqiv_tmpl = {
.name = "seqiv",
.create = seqiv_create,
- .free = seqiv_free,
.module = THIS_MODULE,
};
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 56fa665a4f01..492c1d0bfe06 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -449,8 +449,9 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
}
EXPORT_SYMBOL_GPL(serpent_setkey);
-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
+void __serpent_encrypt(const void *c, u8 *dst, const u8 *src)
{
+ const struct serpent_ctx *ctx = c;
const u32 *k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
@@ -514,8 +515,9 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
__serpent_encrypt(ctx, dst, src);
}
-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
+void __serpent_decrypt(const void *c, u8 *dst, const u8 *src)
{
+ const struct serpent_ctx *ctx = c;
const u32 *k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
diff --git a/crypto/shash.c b/crypto/shash.c
index e83c5124f6eb..c075b26c2a1d 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -50,8 +50,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
{
- if (crypto_shash_alg_has_setkey(alg) &&
- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ if (crypto_shash_alg_needs_key(alg))
crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
@@ -386,18 +385,51 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
return 0;
}
+static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+
+ alg->exit_tfm(hash);
+}
+
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
+ int err;
hash->descsize = alg->descsize;
shash_set_needkey(hash, alg);
+ if (alg->exit_tfm)
+ tfm->exit = crypto_shash_exit_tfm;
+
+ if (!alg->init_tfm)
+ return 0;
+
+ err = alg->init_tfm(hash);
+ if (err)
+ return err;
+
+ /* ->init_tfm() may have increased the descsize. */
+ if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ return -EINVAL;
+ }
+
return 0;
}
+static void crypto_shash_free_instance(struct crypto_instance *inst)
+{
+ struct shash_instance *shash = shash_instance(inst);
+
+ shash->free(shash);
+}
+
#ifdef CONFIG_NET
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -434,6 +466,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
static const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
+ .free = crypto_shash_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
@@ -444,6 +477,15 @@ static const struct crypto_type crypto_shash_type = {
.tfmsize = offsetof(struct crypto_shash, base),
};
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_shash_type;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_shash);
+
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask)
{
@@ -495,9 +537,9 @@ int crypto_register_shash(struct shash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_register_shash);
-int crypto_unregister_shash(struct shash_alg *alg)
+void crypto_unregister_shash(struct shash_alg *alg)
{
- return crypto_unregister_alg(&alg->base);
+ crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
@@ -521,19 +563,12 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_register_shashes);
-int crypto_unregister_shashes(struct shash_alg *algs, int count)
+void crypto_unregister_shashes(struct shash_alg *algs, int count)
{
- int i, ret;
+ int i;
- for (i = count - 1; i >= 0; --i) {
- ret = crypto_unregister_shash(&algs[i]);
- if (ret)
- pr_err("Failed to unregister %s %s: %d\n",
- algs[i].base.cra_driver_name,
- algs[i].base.cra_name, ret);
- }
-
- return 0;
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_shash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_shashes);
@@ -542,6 +577,9 @@ int shash_register_instance(struct crypto_template *tmpl,
{
int err;
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
err = shash_prepare_alg(&inst->alg);
if (err)
return err;
@@ -550,31 +588,12 @@ int shash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(shash_register_instance);
-void shash_free_instance(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(shash_instance(inst));
-}
-EXPORT_SYMBOL_GPL(shash_free_instance);
-
-int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
- struct shash_alg *alg,
- struct crypto_instance *inst)
+void shash_free_singlespawn_instance(struct shash_instance *inst)
{
- return crypto_init_spawn2(&spawn->base, &alg->base, inst,
- &crypto_shash_type);
-}
-EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
-
-struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
-{
- struct crypto_alg *alg;
-
- alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
- return IS_ERR(alg) ? ERR_CAST(alg) :
- container_of(alg, struct shash_alg, base);
+ crypto_drop_spawn(shash_instance_ctx(inst));
+ kfree(inst);
}
-EXPORT_SYMBOL_GPL(shash_attr_alg);
+EXPORT_SYMBOL_GPL(shash_free_singlespawn_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
diff --git a/crypto/simd.c b/crypto/simd.c
index 48876266cf2d..56885af49c24 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -52,15 +52,11 @@ static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
- int err;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, key_len);
- crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_skcipher_setkey(child, key, key_len);
}
static int simd_skcipher_encrypt(struct skcipher_request *req)
@@ -295,15 +291,11 @@ static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *child = &ctx->cryptd_tfm->base;
- int err;
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(child, key, key_len);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_aead_setkey(child, key, key_len);
}
static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 13da43c84b64..7221def7b9a7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -549,15 +549,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
return err;
}
-int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
- bool atomic)
-{
- walk->total = req->cryptlen;
-
- return skcipher_walk_aead_common(walk, req, atomic);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_aead);
-
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
@@ -578,14 +569,9 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
-static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
-{
- return crypto_alg_extsize(alg);
-}
-
static void skcipher_set_needkey(struct crypto_skcipher *tfm)
{
- if (tfm->keysize)
+ if (crypto_skcipher_max_keysize(tfm) != 0)
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
@@ -610,17 +596,15 @@ static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
return ret;
}
-static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
int err;
- if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
return -EINVAL;
- }
if ((unsigned long)key & alignmask)
err = skcipher_setkey_unaligned(tfm, key, keylen);
@@ -635,6 +619,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
@@ -647,7 +632,7 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = tfm->encrypt(req);
+ ret = crypto_skcipher_alg(tfm)->encrypt(req);
crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
return ret;
}
@@ -664,7 +649,7 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = tfm->decrypt(req);
+ ret = crypto_skcipher_alg(tfm)->decrypt(req);
crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
return ret;
}
@@ -683,12 +668,6 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
- skcipher->setkey = skcipher_setkey;
- skcipher->encrypt = alg->encrypt;
- skcipher->decrypt = alg->decrypt;
- skcipher->ivsize = alg->ivsize;
- skcipher->keysize = alg->max_keysize;
-
skcipher_set_needkey(skcipher);
if (alg->exit)
@@ -754,7 +733,7 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
#endif
static const struct crypto_type crypto_skcipher_type = {
- .extsize = crypto_skcipher_extsize,
+ .extsize = crypto_alg_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.free = crypto_skcipher_free_instance,
#ifdef CONFIG_PROC_FS
@@ -768,10 +747,11 @@ static const struct crypto_type crypto_skcipher_type = {
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
- const char *name, u32 type, u32 mask)
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_skcipher_type;
- return crypto_grab_spawn(&spawn->base, name, type, mask);
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
@@ -885,6 +865,9 @@ int skcipher_register_instance(struct crypto_template *tmpl,
{
int err;
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
err = skcipher_prepare_alg(&inst->alg);
if (err)
return err;
@@ -897,21 +880,17 @@ static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- int err;
crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(cipher, key, keylen);
- crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
- CRYPTO_TFM_RES_MASK);
- return err;
+ return crypto_cipher_setkey(cipher, key, keylen);
}
static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
@@ -932,7 +911,7 @@ static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
static void skcipher_free_instance_simple(struct skcipher_instance *inst)
{
- crypto_drop_spawn(skcipher_instance_ctx(inst));
+ crypto_drop_cipher(skcipher_instance_ctx(inst));
kfree(inst);
}
@@ -948,21 +927,18 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst)
*
* @tmpl: the template being instantiated
* @tb: the template parameters
- * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
- * returned here. It must be dropped with crypto_mod_put().
*
* Return: a pointer to the new instance, or an ERR_PTR(). The caller still
* needs to register the instance.
*/
-struct skcipher_instance *
-skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
- struct crypto_alg **cipher_alg_ret)
+struct skcipher_instance *skcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
- struct crypto_alg *cipher_alg;
- struct skcipher_instance *inst;
- struct crypto_spawn *spawn;
u32 mask;
+ struct skcipher_instance *inst;
+ struct crypto_cipher_spawn *spawn;
+ struct crypto_alg *cipher_alg;
int err;
algt = crypto_get_attr_type(tb);
@@ -972,31 +948,25 @@ skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return ERR_PTR(-EINVAL);
- mask = CRYPTO_ALG_TYPE_MASK |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
-
- cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
- if (IS_ERR(cipher_alg))
- return ERR_CAST(cipher_alg);
+ mask = crypto_requires_off(algt->type, algt->mask,
+ CRYPTO_ALG_NEED_FALLBACK);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst) {
- err = -ENOMEM;
- goto err_put_cipher_alg;
- }
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
spawn = skcipher_instance_ctx(inst);
- err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
- cipher_alg);
+ err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
+ cipher_alg = crypto_spawn_cipher_alg(spawn);
- err = crypto_init_spawn(spawn, cipher_alg,
- skcipher_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
+ cipher_alg);
if (err)
goto err_free_inst;
+
inst->free = skcipher_free_instance_simple;
/* Default algorithm properties, can be overridden */
@@ -1013,13 +983,10 @@ skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.init = skcipher_init_tfm_simple;
inst->alg.exit = skcipher_exit_tfm_simple;
- *cipher_alg_ret = cipher_alg;
return inst;
err_free_inst:
- kfree(inst);
-err_put_cipher_alg:
- crypto_mod_put(cipher_alg);
+ skcipher_free_instance_simple(inst);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index 71ffb343709a..016dbc595705 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -143,29 +143,23 @@ int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
/**
- * crypto_sm4_set_key - Set the AES key.
+ * crypto_sm4_set_key - Set the SM4 key.
* @tfm: The %crypto_tfm that is used in the context.
* @in_key: The input key.
* @key_len: The size of the key.
*
- * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
- * is set. The function uses crypto_sm4_expand_key() to expand the key.
+ * This function uses crypto_sm4_expand_key() to expand the key.
* &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
* retrieved with crypto_tfm_ctx().
+ *
+ * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
*/
int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int ret;
-
- ret = crypto_sm4_expand_key(ctx, in_key, key_len);
- if (!ret)
- return 0;
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ return crypto_sm4_expand_key(ctx, in_key, key_len);
}
EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 82513b6b0abd..88f33c0efb23 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -82,6 +82,19 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
struct aead_test_suite {
const struct aead_testvec *vecs;
unsigned int count;
+
+ /*
+ * Set if trying to decrypt an inauthentic ciphertext with this
+ * algorithm might result in EINVAL rather than EBADMSG, due to other
+ * validation the algorithm does on the inputs such as length checks.
+ */
+ unsigned int einval_allowed : 1;
+
+ /*
+ * Set if the algorithm intentionally ignores the last 8 bytes of the
+ * AAD buffer during decryption.
+ */
+ unsigned int esp_aad : 1;
};
struct cipher_test_suite {
@@ -259,6 +272,9 @@ struct test_sg_division {
* where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary
* @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @iv_offset
+ * @key_offset: misalignment of the key, where 0 is default alignment
+ * @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
+ * the @key_offset
* @finalization_type: what finalization function to use for hashes
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
*/
@@ -269,7 +285,9 @@ struct testvec_config {
struct test_sg_division src_divs[XBUFSIZE];
struct test_sg_division dst_divs[XBUFSIZE];
unsigned int iv_offset;
+ unsigned int key_offset;
bool iv_offset_relative_to_alignmask;
+ bool key_offset_relative_to_alignmask;
enum finalization_type finalization_type;
bool nosimd;
};
@@ -297,6 +315,7 @@ static const struct testvec_config default_cipher_testvec_configs[] = {
.name = "unaligned buffer, offset=1",
.src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
.iv_offset = 1,
+ .key_offset = 1,
}, {
.name = "buffer aligned only to alignmask",
.src_divs = {
@@ -308,6 +327,8 @@ static const struct testvec_config default_cipher_testvec_configs[] = {
},
.iv_offset = 1,
.iv_offset_relative_to_alignmask = true,
+ .key_offset = 1,
+ .key_offset_relative_to_alignmask = true,
}, {
.name = "two even aligned splits",
.src_divs = {
@@ -323,6 +344,7 @@ static const struct testvec_config default_cipher_testvec_configs[] = {
{ .proportion_of_total = 4800, .offset = 18 },
},
.iv_offset = 3,
+ .key_offset = 3,
}, {
.name = "misaligned splits crossing pages, inplace",
.inplace = true,
@@ -355,6 +377,7 @@ static const struct testvec_config default_hash_testvec_configs[] = {
.name = "init+update+final misaligned buffer",
.src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
.finalization_type = FINALIZATION_TYPE_FINAL,
+ .key_offset = 1,
}, {
.name = "digest buffer aligned only to alignmask",
.src_divs = {
@@ -365,6 +388,8 @@ static const struct testvec_config default_hash_testvec_configs[] = {
},
},
.finalization_type = FINALIZATION_TYPE_DIGEST,
+ .key_offset = 1,
+ .key_offset_relative_to_alignmask = true,
}, {
.name = "init+update+update+final two even splits",
.src_divs = {
@@ -740,6 +765,49 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
alignmask, dst_total_len, NULL, NULL);
}
+/*
+ * Support for testing passing a misaligned key to setkey():
+ *
+ * If cfg->key_offset is set, copy the key into a new buffer at that offset,
+ * optionally adding alignmask. Else, just use the key directly.
+ */
+static int prepare_keybuf(const u8 *key, unsigned int ksize,
+ const struct testvec_config *cfg,
+ unsigned int alignmask,
+ const u8 **keybuf_ret, const u8 **keyptr_ret)
+{
+ unsigned int key_offset = cfg->key_offset;
+ u8 *keybuf = NULL, *keyptr = (u8 *)key;
+
+ if (key_offset != 0) {
+ if (cfg->key_offset_relative_to_alignmask)
+ key_offset += alignmask;
+ keybuf = kmalloc(key_offset + ksize, GFP_KERNEL);
+ if (!keybuf)
+ return -ENOMEM;
+ keyptr = keybuf + key_offset;
+ memcpy(keyptr, key, ksize);
+ }
+ *keybuf_ret = keybuf;
+ *keyptr_ret = keyptr;
+ return 0;
+}
+
+/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */
+#define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \
+({ \
+ const u8 *keybuf, *keyptr; \
+ int err; \
+ \
+ err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \
+ &keybuf, &keyptr); \
+ if (err == 0) { \
+ err = setkey_f((tfm), keyptr, (ksize)); \
+ kfree(keybuf); \
+ } \
+ err; \
+})
+
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/* Generate a random length in range [0, max_len], but prefer smaller values */
@@ -759,27 +827,39 @@ static unsigned int generate_random_length(unsigned int max_len)
}
}
-/* Sometimes make some random changes to the given data buffer */
-static void mutate_buffer(u8 *buf, size_t count)
+/* Flip a random bit in the given nonempty data buffer */
+static void flip_random_bit(u8 *buf, size_t size)
+{
+ size_t bitpos;
+
+ bitpos = prandom_u32() % (size * 8);
+ buf[bitpos / 8] ^= 1 << (bitpos % 8);
+}
+
+/* Flip a random byte in the given nonempty data buffer */
+static void flip_random_byte(u8 *buf, size_t size)
+{
+ buf[prandom_u32() % size] ^= 0xff;
+}
+
+/* Sometimes make some random changes to the given nonempty data buffer */
+static void mutate_buffer(u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
- size_t pos;
/* Sometimes flip some bits */
if (prandom_u32() % 4 == 0) {
- num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count * 8);
- for (i = 0; i < num_flips; i++) {
- pos = prandom_u32() % (count * 8);
- buf[pos / 8] ^= 1 << (pos % 8);
- }
+ num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8);
+ for (i = 0; i < num_flips; i++)
+ flip_random_bit(buf, size);
}
/* Sometimes flip some bytes */
if (prandom_u32() % 4 == 0) {
- num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count);
+ num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size);
for (i = 0; i < num_flips; i++)
- buf[prandom_u32() % count] ^= 0xff;
+ flip_random_byte(buf, size);
}
}
@@ -966,6 +1046,11 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
+ if (prandom_u32() % 2 == 0) {
+ cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
+ p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
+ }
+
WARN_ON_ONCE(!valid_testvec_config(cfg));
}
@@ -1103,7 +1188,8 @@ static int test_shash_vec_cfg(const char *driver,
/* Set the key, if specified */
if (vec->ksize) {
- err = crypto_shash_setkey(tfm, vec->key, vec->ksize);
+ err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize,
+ cfg, alignmask);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -1290,7 +1376,8 @@ static int test_ahash_vec_cfg(const char *driver,
/* Set the key, if specified */
if (vec->ksize) {
- err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
+ err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
+ cfg, alignmask);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -1853,7 +1940,6 @@ static int test_aead_vec_cfg(const char *driver, int enc,
cfg->iv_offset +
(cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
struct kvec input[2];
- int expected_error;
int err;
/* Set the key */
@@ -1861,7 +1947,9 @@ static int test_aead_vec_cfg(const char *driver, int enc,
crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
else
crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- err = crypto_aead_setkey(tfm, vec->key, vec->klen);
+
+ err = do_setkey(crypto_aead_setkey, tfm, vec->key, vec->klen,
+ cfg, alignmask);
if (err && err != vec->setkey_error) {
pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
driver, vec_name, vec->setkey_error, err,
@@ -1972,20 +2060,31 @@ static int test_aead_vec_cfg(const char *driver, int enc,
return -EINVAL;
}
- /* Check for success or failure */
- expected_error = vec->novrfy ? -EBADMSG : vec->crypt_error;
- if (err) {
- if (err == expected_error)
- return 0;
- pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
- driver, op, vec_name, expected_error, err, cfg->name);
- return err;
- }
- if (expected_error) {
- pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ /* Check for unexpected success or failure, or wrong error code */
+ if ((err == 0 && vec->novrfy) ||
+ (err != vec->crypt_error && !(err == -EBADMSG && vec->novrfy))) {
+ char expected_error[32];
+
+ if (vec->novrfy &&
+ vec->crypt_error != 0 && vec->crypt_error != -EBADMSG)
+ sprintf(expected_error, "-EBADMSG or %d",
+ vec->crypt_error);
+ else if (vec->novrfy)
+ sprintf(expected_error, "-EBADMSG");
+ else
+ sprintf(expected_error, "%d", vec->crypt_error);
+ if (err) {
+ pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%s, actual_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, expected_error, err,
+ cfg->name);
+ return err;
+ }
+ pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%s, cfg=\"%s\"\n",
driver, op, vec_name, expected_error, cfg->name);
return -EINVAL;
}
+ if (err) /* Expectedly failed. */
+ return 0;
/* Check for the correct output (ciphertext or plaintext) */
err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
@@ -2047,25 +2146,129 @@ static int test_aead_vec(const char *driver, int enc,
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+
+struct aead_extra_tests_ctx {
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ const char *driver;
+ const struct alg_test_desc *test_desc;
+ struct cipher_test_sglists *tsgls;
+ unsigned int maxdatasize;
+ unsigned int maxkeysize;
+
+ struct aead_testvec vec;
+ char vec_name[64];
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ struct testvec_config cfg;
+};
+
/*
- * Generate an AEAD test vector from the given implementation.
- * Assumes the buffers in 'vec' were already allocated.
+ * Make at least one random change to a (ciphertext, AAD) pair. "Ciphertext"
+ * here means the full ciphertext including the authentication tag. The
+ * authentication tag (and hence also the ciphertext) is assumed to be nonempty.
+ */
+static void mutate_aead_message(struct aead_testvec *vec, bool esp_aad)
+{
+ const unsigned int aad_tail_size = esp_aad ? 8 : 0;
+ const unsigned int authsize = vec->clen - vec->plen;
+
+ if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) {
+ /* Mutate the AAD */
+ flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
+ if (prandom_u32() % 2 == 0)
+ return;
+ }
+ if (prandom_u32() % 2 == 0) {
+ /* Mutate auth tag (assuming it's at the end of ciphertext) */
+ flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
+ } else {
+ /* Mutate any part of the ciphertext */
+ flip_random_bit((u8 *)vec->ctext, vec->clen);
+ }
+}
+
+/*
+ * Minimum authentication tag size in bytes at which we assume that we can
+ * reliably generate inauthentic messages, i.e. not generate an authentic
+ * message by chance.
+ */
+#define MIN_COLLISION_FREE_AUTHSIZE 8
+
+static void generate_aead_message(struct aead_request *req,
+ const struct aead_test_suite *suite,
+ struct aead_testvec *vec,
+ bool prefer_inauthentic)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ const unsigned int authsize = vec->clen - vec->plen;
+ const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
+ (prefer_inauthentic || prandom_u32() % 4 == 0);
+
+ /* Generate the AAD. */
+ generate_random_bytes((u8 *)vec->assoc, vec->alen);
+
+ if (inauthentic && prandom_u32() % 2 == 0) {
+ /* Generate a random ciphertext. */
+ generate_random_bytes((u8 *)vec->ctext, vec->clen);
+ } else {
+ int i = 0;
+ struct scatterlist src[2], dst;
+ u8 iv[MAX_IVLEN];
+ DECLARE_CRYPTO_WAIT(wait);
+
+ /* Generate a random plaintext and encrypt it. */
+ sg_init_table(src, 2);
+ if (vec->alen)
+ sg_set_buf(&src[i++], vec->assoc, vec->alen);
+ if (vec->plen) {
+ generate_random_bytes((u8 *)vec->ptext, vec->plen);
+ sg_set_buf(&src[i++], vec->ptext, vec->plen);
+ }
+ sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
+ memcpy(iv, vec->iv, ivsize);
+ aead_request_set_callback(req, 0, crypto_req_done, &wait);
+ aead_request_set_crypt(req, src, &dst, vec->plen, iv);
+ aead_request_set_ad(req, vec->alen);
+ vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req),
+ &wait);
+ /* If encryption failed, we're done. */
+ if (vec->crypt_error != 0)
+ return;
+ memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
+ if (!inauthentic)
+ return;
+ /*
+ * Mutate the authentic (ciphertext, AAD) pair to get an
+ * inauthentic one.
+ */
+ mutate_aead_message(vec, suite->esp_aad);
+ }
+ vec->novrfy = 1;
+ if (suite->einval_allowed)
+ vec->crypt_error = -EINVAL;
+}
+
+/*
+ * Generate an AEAD test vector 'vec' using the implementation specified by
+ * 'req'. The buffers in 'vec' must already be allocated.
+ *
+ * If 'prefer_inauthentic' is true, then this function will generate inauthentic
+ * test vectors (i.e. vectors with 'vec->novrfy=1') more often.
*/
static void generate_random_aead_testvec(struct aead_request *req,
struct aead_testvec *vec,
+ const struct aead_test_suite *suite,
unsigned int maxkeysize,
unsigned int maxdatasize,
- char *name, size_t max_namelen)
+ char *name, size_t max_namelen,
+ bool prefer_inauthentic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
const unsigned int ivsize = crypto_aead_ivsize(tfm);
- unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+ const unsigned int maxauthsize = crypto_aead_maxauthsize(tfm);
unsigned int authsize;
unsigned int total_len;
- int i;
- struct scatterlist src[2], dst;
- u8 iv[MAX_IVLEN];
- DECLARE_CRYPTO_WAIT(wait);
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
@@ -2081,81 +2284,101 @@ static void generate_random_aead_testvec(struct aead_request *req,
authsize = maxauthsize;
if (prandom_u32() % 4 == 0)
authsize = prandom_u32() % (maxauthsize + 1);
+ if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
+ authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
authsize = maxdatasize;
maxdatasize -= authsize;
vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
- /* Plaintext and associated data */
+ /* AAD, plaintext, and ciphertext lengths */
total_len = generate_random_length(maxdatasize);
if (prandom_u32() % 4 == 0)
vec->alen = 0;
else
vec->alen = generate_random_length(total_len);
vec->plen = total_len - vec->alen;
- generate_random_bytes((u8 *)vec->assoc, vec->alen);
- generate_random_bytes((u8 *)vec->ptext, vec->plen);
-
vec->clen = vec->plen + authsize;
/*
- * If the key or authentication tag size couldn't be set, no need to
- * continue to encrypt.
+ * Generate the AAD, plaintext, and ciphertext. Not applicable if the
+ * key or the authentication tag size couldn't be set.
*/
- if (vec->setkey_error || vec->setauthsize_error)
- goto done;
-
- /* Ciphertext */
- sg_init_table(src, 2);
- i = 0;
- if (vec->alen)
- sg_set_buf(&src[i++], vec->assoc, vec->alen);
- if (vec->plen)
- sg_set_buf(&src[i++], vec->ptext, vec->plen);
- sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
- memcpy(iv, vec->iv, ivsize);
- aead_request_set_callback(req, 0, crypto_req_done, &wait);
- aead_request_set_crypt(req, src, &dst, vec->plen, iv);
- aead_request_set_ad(req, vec->alen);
- vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait);
- if (vec->crypt_error == 0)
- memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
-done:
+ vec->novrfy = 0;
+ vec->crypt_error = 0;
+ if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
+ generate_aead_message(req, suite, vec, prefer_inauthentic);
snprintf(name, max_namelen,
- "\"random: alen=%u plen=%u authsize=%u klen=%u\"",
- vec->alen, vec->plen, authsize, vec->klen);
+ "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
+ vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
+}
+
+static void try_to_generate_inauthentic_testvec(
+ struct aead_extra_tests_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ generate_random_aead_testvec(ctx->req, &ctx->vec,
+ &ctx->test_desc->suite.aead,
+ ctx->maxkeysize, ctx->maxdatasize,
+ ctx->vec_name,
+ sizeof(ctx->vec_name), true);
+ if (ctx->vec.novrfy)
+ return;
+ }
}
/*
- * Test the AEAD algorithm represented by @req against the corresponding generic
- * implementation, if one is available.
+ * Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
+ * result of an encryption with the key) and verify that decryption fails.
*/
-static int test_aead_vs_generic_impl(const char *driver,
- const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
+static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- const unsigned int ivsize = crypto_aead_ivsize(tfm);
- const unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
- const unsigned int blocksize = crypto_aead_blocksize(tfm);
- const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ /*
+ * Since this part of the tests isn't comparing the
+ * implementation to another, there's no point in testing any
+ * test vectors other than inauthentic ones (vec.novrfy=1) here.
+ *
+ * If we're having trouble generating such a test vector, e.g.
+ * if the algorithm keeps rejecting the generated keys, don't
+ * retry forever; just continue on.
+ */
+ try_to_generate_inauthentic_testvec(ctx);
+ if (ctx->vec.novrfy) {
+ generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ sizeof(ctx->cfgname));
+ err = test_aead_vec_cfg(ctx->driver, DECRYPT, &ctx->vec,
+ ctx->vec_name, &ctx->cfg,
+ ctx->req, ctx->tsgls);
+ if (err)
+ return err;
+ }
+ cond_resched();
+ }
+ return 0;
+}
+
+/*
+ * Test the AEAD algorithm against the corresponding generic implementation, if
+ * one is available.
+ */
+static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+{
+ struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
- const char *generic_driver = test_desc->generic_driver;
+ const char *driver = ctx->driver;
+ const char *generic_driver = ctx->test_desc->generic_driver;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *generic_tfm = NULL;
struct aead_request *generic_req = NULL;
- unsigned int maxkeysize;
unsigned int i;
- struct aead_testvec vec = { 0 };
- char vec_name[64];
- struct testvec_config *cfg;
- char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
- return 0;
-
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
@@ -2179,12 +2402,6 @@ static int test_aead_vs_generic_impl(const char *driver,
return err;
}
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- err = -ENOMEM;
- goto out;
- }
-
generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
if (!generic_req) {
err = -ENOMEM;
@@ -2193,24 +2410,27 @@ static int test_aead_vs_generic_impl(const char *driver,
/* Check the algorithm properties for consistency. */
- if (maxauthsize != crypto_aead_alg(generic_tfm)->maxauthsize) {
+ if (crypto_aead_maxauthsize(tfm) !=
+ crypto_aead_maxauthsize(generic_tfm)) {
pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
- driver, maxauthsize,
- crypto_aead_alg(generic_tfm)->maxauthsize);
+ driver, crypto_aead_maxauthsize(tfm),
+ crypto_aead_maxauthsize(generic_tfm));
err = -EINVAL;
goto out;
}
- if (ivsize != crypto_aead_ivsize(generic_tfm)) {
+ if (crypto_aead_ivsize(tfm) != crypto_aead_ivsize(generic_tfm)) {
pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
- driver, ivsize, crypto_aead_ivsize(generic_tfm));
+ driver, crypto_aead_ivsize(tfm),
+ crypto_aead_ivsize(generic_tfm));
err = -EINVAL;
goto out;
}
- if (blocksize != crypto_aead_blocksize(generic_tfm)) {
+ if (crypto_aead_blocksize(tfm) != crypto_aead_blocksize(generic_tfm)) {
pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
- driver, blocksize, crypto_aead_blocksize(generic_tfm));
+ driver, crypto_aead_blocksize(tfm),
+ crypto_aead_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
@@ -2219,55 +2439,93 @@ static int test_aead_vs_generic_impl(const char *driver,
* Now generate test vectors using the generic implementation, and test
* the other implementation against them.
*/
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_aead_testvec(generic_req, &ctx->vec,
+ &ctx->test_desc->suite.aead,
+ ctx->maxkeysize, ctx->maxdatasize,
+ ctx->vec_name,
+ sizeof(ctx->vec_name), false);
+ generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+ sizeof(ctx->cfgname));
+ if (!ctx->vec.novrfy) {
+ err = test_aead_vec_cfg(driver, ENCRYPT, &ctx->vec,
+ ctx->vec_name, &ctx->cfg,
+ ctx->req, ctx->tsgls);
+ if (err)
+ goto out;
+ }
+ if (ctx->vec.crypt_error == 0 || ctx->vec.novrfy) {
+ err = test_aead_vec_cfg(driver, DECRYPT, &ctx->vec,
+ ctx->vec_name, &ctx->cfg,
+ ctx->req, ctx->tsgls);
+ if (err)
+ goto out;
+ }
+ cond_resched();
+ }
+ err = 0;
+out:
+ crypto_free_aead(generic_tfm);
+ aead_request_free(generic_req);
+ return err;
+}
- maxkeysize = 0;
- for (i = 0; i < test_desc->suite.aead.count; i++)
- maxkeysize = max_t(unsigned int, maxkeysize,
- test_desc->suite.aead.vecs[i].klen);
+static int test_aead_extra(const char *driver,
+ const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ struct aead_extra_tests_ctx *ctx;
+ unsigned int i;
+ int err;
- vec.key = kmalloc(maxkeysize, GFP_KERNEL);
- vec.iv = kmalloc(ivsize, GFP_KERNEL);
- vec.assoc = kmalloc(maxdatasize, GFP_KERNEL);
- vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
- vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
- if (!vec.key || !vec.iv || !vec.assoc || !vec.ptext || !vec.ctext) {
+ if (noextratests)
+ return 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->req = req;
+ ctx->tfm = crypto_aead_reqtfm(req);
+ ctx->driver = driver;
+ ctx->test_desc = test_desc;
+ ctx->tsgls = tsgls;
+ ctx->maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ ctx->maxkeysize = 0;
+ for (i = 0; i < test_desc->suite.aead.count; i++)
+ ctx->maxkeysize = max_t(unsigned int, ctx->maxkeysize,
+ test_desc->suite.aead.vecs[i].klen);
+
+ ctx->vec.key = kmalloc(ctx->maxkeysize, GFP_KERNEL);
+ ctx->vec.iv = kmalloc(crypto_aead_ivsize(ctx->tfm), GFP_KERNEL);
+ ctx->vec.assoc = kmalloc(ctx->maxdatasize, GFP_KERNEL);
+ ctx->vec.ptext = kmalloc(ctx->maxdatasize, GFP_KERNEL);
+ ctx->vec.ctext = kmalloc(ctx->maxdatasize, GFP_KERNEL);
+ if (!ctx->vec.key || !ctx->vec.iv || !ctx->vec.assoc ||
+ !ctx->vec.ptext || !ctx->vec.ctext) {
err = -ENOMEM;
goto out;
}
- for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_aead_testvec(generic_req, &vec,
- maxkeysize, maxdatasize,
- vec_name, sizeof(vec_name));
- generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+ err = test_aead_inauthentic_inputs(ctx);
+ if (err)
+ goto out;
- err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, cfg,
- req, tsgls);
- if (err)
- goto out;
- err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
- req, tsgls);
- if (err)
- goto out;
- cond_resched();
- }
- err = 0;
+ err = test_aead_vs_generic_impl(ctx);
out:
- kfree(cfg);
- kfree(vec.key);
- kfree(vec.iv);
- kfree(vec.assoc);
- kfree(vec.ptext);
- kfree(vec.ctext);
- crypto_free_aead(generic_tfm);
- aead_request_free(generic_req);
+ kfree(ctx->vec.key);
+ kfree(ctx->vec.iv);
+ kfree(ctx->vec.assoc);
+ kfree(ctx->vec.ptext);
+ kfree(ctx->vec.ctext);
+ kfree(ctx);
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_aead_vs_generic_impl(const char *driver,
- const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
+static int test_aead_extra(const char *driver,
+ const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
{
return 0;
}
@@ -2336,7 +2594,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
if (err)
goto out;
- err = test_aead_vs_generic_impl(driver, desc, req, tsgls);
+ err = test_aead_extra(driver, desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -2457,7 +2715,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
else
crypto_skcipher_clear_flags(tfm,
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+ err = do_setkey(crypto_skcipher_setkey, tfm, vec->key, vec->klen,
+ cfg, alignmask);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -2647,7 +2906,7 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
char *name, size_t max_namelen)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const unsigned int maxkeysize = tfm->keysize;
+ const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm);
const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct scatterlist src, dst;
u8 iv[MAX_IVLEN];
@@ -2678,6 +2937,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ if (vec->crypt_error != 0) {
+ /*
+ * The only acceptable error here is for an invalid length, so
+ * skcipher decryption should fail with the same error too.
+ * We'll test for this. But to keep the API usage well-defined,
+ * explicitly initialize the ciphertext buffer too.
+ */
+ memset((u8 *)vec->ctext, 0, vec->len);
+ }
done:
snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
vec->len, vec->klen);
@@ -2693,6 +2961,7 @@ static int test_skcipher_vs_generic_impl(const char *driver,
struct cipher_test_sglists *tsgls)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm);
const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
@@ -2751,9 +3020,19 @@ static int test_skcipher_vs_generic_impl(const char *driver,
/* Check the algorithm properties for consistency. */
- if (tfm->keysize != generic_tfm->keysize) {
+ if (crypto_skcipher_min_keysize(tfm) !=
+ crypto_skcipher_min_keysize(generic_tfm)) {
+ pr_err("alg: skcipher: min keysize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, crypto_skcipher_min_keysize(tfm),
+ crypto_skcipher_min_keysize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (maxkeysize != crypto_skcipher_max_keysize(generic_tfm)) {
pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
- driver, tfm->keysize, generic_tfm->keysize);
+ driver, maxkeysize,
+ crypto_skcipher_max_keysize(generic_tfm));
err = -EINVAL;
goto out;
}
@@ -2778,7 +3057,7 @@ static int test_skcipher_vs_generic_impl(const char *driver,
* the other implementation against them.
*/
- vec.key = kmalloc(tfm->keysize, GFP_KERNEL);
+ vec.key = kmalloc(maxkeysize, GFP_KERNEL);
vec.iv = kmalloc(ivsize, GFP_KERNEL);
vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
@@ -3862,7 +4141,8 @@ static int alg_test_null(const struct alg_test_desc *desc,
return 0;
}
-#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
+#define ____VECS(tv) .vecs = tv, .count = ARRAY_SIZE(tv)
+#define __VECS(tv) { ____VECS(tv) }
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
@@ -4168,7 +4448,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = __VECS(aes_ccm_tv_template)
+ .aead = {
+ ____VECS(aes_ccm_tv_template),
+ .einval_allowed = 1,
+ }
}
}, {
.alg = "cfb(aes)",
@@ -4916,7 +5199,11 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = __VECS(aes_gcm_rfc4106_tv_template)
+ .aead = {
+ ____VECS(aes_gcm_rfc4106_tv_template),
+ .einval_allowed = 1,
+ .esp_aad = 1,
+ }
}
}, {
.alg = "rfc4309(ccm(aes))",
@@ -4924,14 +5211,21 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = __VECS(aes_ccm_rfc4309_tv_template)
+ .aead = {
+ ____VECS(aes_ccm_rfc4309_tv_template),
+ .einval_allowed = 1,
+ .esp_aad = 1,
+ }
}
}, {
.alg = "rfc4543(gcm(aes))",
.generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.suite = {
- .aead = __VECS(aes_gcm_rfc4543_tv_template)
+ .aead = {
+ ____VECS(aes_gcm_rfc4543_tv_template),
+ .einval_allowed = 1,
+ }
}
}, {
.alg = "rfc7539(chacha20,poly1305)",
@@ -4943,7 +5237,11 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "rfc7539esp(chacha20,poly1305)",
.test = alg_test_aead,
.suite = {
- .aead = __VECS(rfc7539esp_tv_template)
+ .aead = {
+ ____VECS(rfc7539esp_tv_template),
+ .einval_allowed = 1,
+ .esp_aad = 1,
+ }
}
}, {
.alg = "rmd128",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 48da646651cb..d29983908c38 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -85,16 +85,22 @@ struct cipher_testvec {
* @ctext: Pointer to the full authenticated ciphertext. For AEADs that
* produce a separate "ciphertext" and "authentication tag", these
* two parts are concatenated: ciphertext || tag.
- * @novrfy: Decryption verification failure expected?
+ * @novrfy: If set, this is an inauthentic input test: only decryption is
+ * tested, and it is expected to fail with either -EBADMSG or
+ * @crypt_error if it is nonzero.
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* (e.g. setkey() needs to fail due to a weak key)
* @klen: Length of @key in bytes
* @plen: Length of @ptext in bytes
* @alen: Length of @assoc in bytes
* @clen: Length of @ctext in bytes
- * @setkey_error: Expected error from setkey()
- * @setauthsize_error: Expected error from setauthsize()
- * @crypt_error: Expected error from encrypt() and decrypt()
+ * @setkey_error: Expected error from setkey(). If set, neither encryption nor
+ * decryption is tested.
+ * @setauthsize_error: Expected error from setauthsize(). If set, neither
+ * encryption nor decryption is tested.
+ * @crypt_error: When @novrfy=0, the expected error from encrypt(). When
+ * @novrfy=1, an optional alternate error code that is acceptable
+ * for decrypt() to return besides -EBADMSG.
*/
struct aead_testvec {
const char *key;
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index 222fc765c57a..d23fa531b91f 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -567,7 +567,7 @@ static const u8 calc_sb_tbl[512] = {
/* Perform the key setup. */
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
- unsigned int key_len, u32 *flags)
+ unsigned int key_len)
{
int i, j, k;
@@ -584,10 +584,7 @@ int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
/* Check key length. */
if (key_len % 8)
- {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; /* unsupported key length */
- }
/* Compute the first two words of the S vector. The magic numbers are
* the entries of the RS matrix, preprocessed through poly_to_exp. The
@@ -688,8 +685,7 @@ EXPORT_SYMBOL_GPL(__twofish_setkey);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{
- return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len,
- &tfm->crt_flags);
+ return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len);
}
EXPORT_SYMBOL_GPL(twofish_setkey);
diff --git a/crypto/vmac.c b/crypto/vmac.c
index f50a85060b39..2d906830df96 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -435,10 +435,8 @@ static int vmac_setkey(struct crypto_shash *tfm,
unsigned int i;
int err;
- if (keylen != VMAC_KEY_LEN) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != VMAC_KEY_LEN)
return -EINVAL;
- }
err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
@@ -598,7 +596,7 @@ static int vmac_final(struct shash_desc *desc, u8 *out)
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
@@ -620,6 +618,7 @@ static void vmac_exit_tfm(struct crypto_tfm *tfm)
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
+ struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
int err;
@@ -627,25 +626,24 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
return err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = shash_instance_ctx(inst);
+
+ err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, 0);
+ if (err)
+ goto err_free_inst;
+ alg = crypto_spawn_cipher_alg(spawn);
err = -EINVAL;
if (alg->cra_blocksize != VMAC_NONCEBYTES)
- goto out_put_alg;
+ goto err_free_inst;
- inst = shash_alloc_instance(tmpl->name, alg);
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- err = crypto_init_spawn(shash_instance_ctx(inst), alg,
- shash_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
+ err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
@@ -662,21 +660,19 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.final = vmac_final;
inst->alg.setkey = vmac_setkey;
+ inst->free = shash_free_singlespawn_instance;
+
err = shash_register_instance(tmpl, inst);
if (err) {
-out_free_inst:
- shash_free_instance(shash_crypto_instance(inst));
+err_free_inst:
+ shash_free_singlespawn_instance(inst);
}
-
-out_put_alg:
- crypto_mod_put(alg);
return err;
}
static struct crypto_template vmac64_tmpl = {
.name = "vmac64",
.create = vmac_create,
- .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 0bb26e8f6f5a..598ec88abf0f 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -167,7 +167,7 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
@@ -188,6 +188,7 @@ static void xcbc_exit_tfm(struct crypto_tfm *tfm)
static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
+ struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
int err;
@@ -196,28 +197,24 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
return err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = shash_instance_ctx(inst);
- switch(alg->cra_blocksize) {
- case XCBC_BLOCKSIZE:
- break;
- default:
- goto out_put_alg;
- }
+ err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, 0);
+ if (err)
+ goto err_free_inst;
+ alg = crypto_spawn_cipher_alg(spawn);
- inst = shash_alloc_instance("xcbc", alg);
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
+ err = -EINVAL;
+ if (alg->cra_blocksize != XCBC_BLOCKSIZE)
+ goto err_free_inst;
- err = crypto_init_spawn(shash_instance_ctx(inst), alg,
- shash_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
+ err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
alignmask = alg->cra_alignmask | 3;
inst->alg.base.cra_alignmask = alignmask;
@@ -242,21 +239,19 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.final = crypto_xcbc_digest_final;
inst->alg.setkey = crypto_xcbc_digest_setkey;
+ inst->free = shash_free_singlespawn_instance;
+
err = shash_register_instance(tmpl, inst);
if (err) {
-out_free_inst:
- shash_free_instance(shash_crypto_instance(inst));
+err_free_inst:
+ shash_free_singlespawn_instance(inst);
}
-
-out_put_alg:
- crypto_mod_put(alg);
return err;
}
static struct crypto_template crypto_xcbc_tmpl = {
.name = "xcbc",
.create = xcbc_create,
- .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/xts.c b/crypto/xts.c
index ab117633d64e..29efa15f1495 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -61,8 +61,6 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tweak, key + keylen, keylen);
- crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -71,11 +69,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
+ return crypto_skcipher_setkey(child, key, keylen);
}
/*
@@ -361,20 +355,21 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx = skcipher_instance_ctx(inst);
- crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
-
mask = crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC);
- err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
+ err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
+ cipher_name, 0, mask);
if (err == -ENOENT) {
err = -ENAMETOOLONG;
if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
+ err = crypto_grab_skcipher(&ctx->spawn,
+ skcipher_crypto_instance(inst),
+ ctx->name, 0, mask);
}
if (err)
diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c
index 4aad2c0f40a9..55d1c8a76127 100644
--- a/crypto/xxhash_generic.c
+++ b/crypto/xxhash_generic.c
@@ -22,10 +22,8 @@ static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(tctx->seed)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(tctx->seed))
return -EINVAL;
- }
tctx->seed = get_unaligned_le64(key);
return 0;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8486c29d8324..914e293ba62b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 899ff25f4f28..32d9fe61a225 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -213,6 +213,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 91eb768d4221..c2767ed54dfe 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -248,15 +248,15 @@ config CRYPTO_DEV_MARVELL_CESA
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ depends on SPARC64
+ help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
@@ -357,7 +357,7 @@ config CRYPTO_DEV_OMAP
depends on ARCH_OMAP2PLUS
help
OMAP processors have various crypto HW accelerators. Select this if
- you want to use the OMAP modules for any of the crypto algorithms.
+ you want to use the OMAP modules for any of the crypto algorithms.
if CRYPTO_DEV_OMAP
@@ -430,7 +430,7 @@ config CRYPTO_DEV_SAHARA
found in some Freescale i.MX chips.
config CRYPTO_DEV_EXYNOS_RNG
- tristate "EXYNOS HW pseudo random number generator support"
+ tristate "Exynos HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_RNG
@@ -618,6 +618,14 @@ config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
+ help
+ This driver supports Qualcomm crypto engine accelerator
+ hardware. To compile this driver as a module, choose M here. The
+ module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SKCIPHER
+ bool
+ depends on CRYPTO_DEV_QCE
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_ECB
@@ -625,10 +633,57 @@ config CRYPTO_DEV_QCE
select CRYPTO_XTS
select CRYPTO_CTR
select CRYPTO_SKCIPHER
- help
- This driver supports Qualcomm crypto engine accelerator
- hardware. To compile this driver as a module, choose M here. The
- module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SHA
+ bool
+ depends on CRYPTO_DEV_QCE
+
+choice
+ prompt "Algorithms enabled for QCE acceleration"
+ default CRYPTO_DEV_QCE_ENABLE_ALL
+ depends on CRYPTO_DEV_QCE
+ help
+ This option allows to choose whether to build support for all algorihtms
+ (default), hashes-only, or skciphers-only.
+
+ The QCE engine does not appear to scale as well as the CPU to handle
+ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
+ QCE handles only 2 requests in parallel.
+
+ Ipsec throughput seems to improve when disabling either family of
+ algorithms, sharing the load with the CPU. Enabling skciphers-only
+ appears to work best.
+
+ config CRYPTO_DEV_QCE_ENABLE_ALL
+ bool "All supported algorithms"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable all supported algorithms:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (CBC, ECB)
+ - DES (CBC, ECB)
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
+ bool "Symmetric-key ciphers only"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ help
+ Enable symmetric-key ciphers only:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (ECB, CBC)
+ - DES (ECB, CBC)
+
+ config CRYPTO_DEV_QCE_ENABLE_SHA
+ bool "Hash/HMAC only"
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable hashes/HMAC algorithms only:
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+endchoice
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
@@ -639,7 +694,7 @@ config CRYPTO_DEV_QCOM_RNG
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. The
- module will be called qcom-rng. If unsure, say N.
+ module will be called qcom-rng. If unsure, say N.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
@@ -716,7 +771,7 @@ source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
- depends on OF || PCI || COMPILE_TEST
+ depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index cb2b0874f68f..7f22d305178e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
op->keylen = keylen;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 814cd12149a9..a2b67f7f8a81 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
@@ -22,6 +23,14 @@
#include "sun4i-ss.h"
+static const struct ss_variant ss_a10_variant = {
+ .sha1_in_be = false,
+};
+
+static const struct ss_variant ss_a33_variant = {
+ .sha1_in_be = true,
+};
+
static struct sun4i_ss_alg_template ss_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.mode = SS_OP_MD5,
@@ -273,7 +282,7 @@ err_enable:
return err;
}
-const struct dev_pm_ops sun4i_ss_pm_ops = {
+static const struct dev_pm_ops sun4i_ss_pm_ops = {
SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
};
@@ -323,6 +332,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
return PTR_ERR(ss->base);
}
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Security System variant\n");
+ return -EINVAL;
+ }
+
ss->ssclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(ss->ssclk)) {
err = PTR_ERR(ss->ssclk);
@@ -484,7 +499,12 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
static const struct of_device_id a20ss_crypto_of_match_table[] = {
- { .compatible = "allwinner,sun4i-a10-crypto" },
+ { .compatible = "allwinner,sun4i-a10-crypto",
+ .data = &ss_a10_variant
+ },
+ { .compatible = "allwinner,sun8i-a33-crypto",
+ .data = &ss_a33_variant
+ },
{}
};
MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fdc0e6cdbb85..dc35edd90034 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -479,7 +479,10 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ if (ss->variant->sha1_in_be)
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ else
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
} else {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 60425ac75d90..2b4c6333eb67 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -131,7 +131,16 @@
#define SS_SEED_LEN 192
#define SS_DATA_LEN 160
+/*
+ * struct ss_variant - Describe SS hardware variant
+ * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted.
+ */
+struct ss_variant {
+ bool sha1_in_be;
+};
+
struct sun4i_ss_ctx {
+ const struct ss_variant *variant;
void __iomem *base;
int irq;
struct clk *busclk;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 37d0b6c386a0..a5fd8975f3d3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -144,11 +144,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
- chan->op_dir = rctx->op_dir;
- chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
- chan->keylen = op->keylen;
-
addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
cet->t_key = cpu_to_le32(addr_key);
if (dma_mapping_error(ce->dev, addr_key)) {
@@ -394,7 +389,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 73a7649f915d..f72346a44e69 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -555,7 +555,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return -EINVAL;
}
- ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ce->base))
return PTR_ERR(ce->base);
@@ -624,7 +624,7 @@ error_alg:
error_irq:
sun8i_ce_pm_exit(ce);
error_pm:
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
}
@@ -638,7 +638,7 @@ static int sun8i_ce_remove(struct platform_device *pdev)
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
sun8i_ce_pm_exit(ce);
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 43db49ceafe4..8f8404c84a4d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -131,12 +131,8 @@ struct ce_task {
* @engine: ptr to the crypto_engine for this flow
* @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
- * @keylen: keylen for this flow operation
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
- * @method: current method for flow
- * @op_dir: direction (encrypt vs decrypt) of this flow
- * @op_mode: op_mode for this flow
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
* @stat_req: number of request done by this flow
@@ -145,12 +141,8 @@ struct sun8i_ce_flow {
struct crypto_engine *engine;
void *bounce_iv;
unsigned int ivlen;
- unsigned int keylen;
struct completion complete;
int status;
- u32 method;
- u32 op_dir;
- u32 op_mode;
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index f222979a5623..84d52fc3a2da 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
@@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 90997cc509b8..6b301afffd11 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -595,7 +595,7 @@ error_alg:
error_irq:
sun8i_ss_pm_exit(ss);
error_pm:
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
return err;
}
@@ -609,7 +609,7 @@ static int sun8i_ss_remove(struct platform_device *pdev)
debugfs_remove_recursive(ss->dbgfs_dir);
#endif
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
sun8i_ss_pm_exit(ss);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a42f8619589d..f7fc0c464125 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
struct dynamic_sa_ctl *sa;
int rc;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
@@ -292,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
- crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_skcipher_set_flags(cipher,
- crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -379,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(ctx->sw_cipher.aead,
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
- crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(cipher,
- crypto_aead_get_flags(ctx->sw_cipher.aead) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
/**
@@ -551,10 +532,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
struct dynamic_sa_ctl *sa;
int rc = 0;
- if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
return -EINVAL;
- }
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 7d6b695c4ab3..981de43ea5e2 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -169,7 +169,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
int i;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
- &dev->pdr_pa, GFP_ATOMIC);
+ &dev->pdr_pa, GFP_KERNEL);
if (!dev->pdr)
return -ENOMEM;
@@ -185,13 +185,13 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!dev->shadow_sa_pool)
return -ENOMEM;
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
- &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+ &dev->shadow_sr_pool_pa, GFP_KERNEL);
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
@@ -277,7 +277,7 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ &dev->gdr_pa, GFP_KERNEL);
if (!dev->gdr)
return -ENOMEM;
@@ -286,7 +286,8 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
{
- dma_free_coherent(dev->core_dev->device,
+ if (dev->gdr)
+ dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
dev->gdr, dev->gdr_pa);
}
@@ -354,20 +355,20 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- /* alloc memory for scatter descriptor ring */
- dev->sdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- &dev->sdr_pa, GFP_ATOMIC);
- if (!dev->sdr)
- return -ENOMEM;
-
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
- &dev->scatter_buffer_pa, GFP_ATOMIC);
+ &dev->scatter_buffer_pa, GFP_KERNEL);
if (!dev->scatter_buffer_va)
return -ENOMEM;
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_KERNEL);
+ if (!dev->sdr)
+ return -ENOMEM;
+
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
@@ -1439,16 +1440,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
+ goto err_build_sdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
-
- rc = crypto4xx_build_sdr(core_dev->dev);
- if (rc)
goto err_build_sdr;
/* Init tasklet for bottom half processing */
@@ -1493,7 +1493,6 @@ err_iomap:
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_pdr:
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index b90850d18965..cf9547602670 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
+ depends on HAS_IOMEM
default y if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e589015aac1c..9819dd50fbad 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index fa05fce1c0de..9d4ead2f7ebb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -289,7 +289,7 @@ static int meson_crypto_probe(struct platform_device *pdev)
error_alg:
meson_unregister_algs(mc);
error_flow:
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return err;
}
@@ -304,7 +304,7 @@ static int meson_crypto_remove(struct platform_device *pdev)
meson_unregister_algs(mc);
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return 0;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 91092504bc96..a6e14491e080 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -37,8 +38,6 @@
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
-#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#include "atmel-authenc.h"
@@ -89,7 +88,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
- bool has_ctr32;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -122,6 +120,7 @@ struct atmel_aes_ctr_ctx {
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
+ u32 blocks;
};
struct atmel_aes_gcm_ctx {
@@ -514,8 +513,37 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
}
}
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+}
+
+static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ int i;
+
+ /*
+ * The CTR transfer works in fragments of data of maximum 1 MByte
+ * because of the 16 bit CTR counter embedded in the IP. When reaching
+ * here, ctx->blocks contains the number of blocks of the last fragment
+ * processed, there is no need to explicit cast it to u16.
+ */
+ for (i = 0; i < ctx->blocks; i++)
+ crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+
+ memcpy(req->iv, ctx->iv, ivsize);
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
@@ -524,8 +552,13 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead)
- atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && !dd->ctx->is_aead &&
+ (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
+ if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ else
+ atmel_aes_ctr_update_req_iv(dd);
+ }
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -790,7 +823,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
int err;
memset(&config, 0, sizeof(config));
- config.direction = dir;
config.src_addr_width = addr_width;
config.dst_addr_width = addr_width;
config.src_maxburst = maxburst;
@@ -830,27 +862,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
return 0;
}
-static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
- enum dma_transfer_direction dir)
-{
- struct atmel_aes_dma *dma;
-
- switch (dir) {
- case DMA_MEM_TO_DEV:
- dma = &dd->src;
- break;
-
- case DMA_DEV_TO_MEM:
- dma = &dd->dst;
- break;
-
- default:
- return;
- }
-
- dmaengine_terminate_all(dma->chan);
-}
-
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
struct scatterlist *src,
struct scatterlist *dst,
@@ -909,25 +920,18 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
return -EINPROGRESS;
output_transfer_stop:
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+ dmaengine_terminate_sync(dd->dst.chan);
unmap:
atmel_aes_unmap(dd);
exit:
return atmel_aes_complete(dd, err);
}
-static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
-{
- atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
- atmel_aes_unmap(dd);
-}
-
static void atmel_aes_dma_callback(void *data)
{
struct atmel_aes_dev *dd = data;
- atmel_aes_dma_stop(dd);
+ atmel_aes_unmap(dd);
dd->is_async = true;
(void)dd->resume(dd);
}
@@ -1004,19 +1008,14 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
atmel_aes_transfer_complete);
}
-static inline struct atmel_aes_ctr_ctx *
-atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct atmel_aes_ctr_ctx, base);
-}
-
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
- u32 ctr, blocks;
size_t datalen;
+ u32 ctr;
+ u16 start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@@ -1026,29 +1025,19 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Compute data length. */
datalen = req->cryptlen - ctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+ ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
- if (dd->caps.has_ctr32) {
- /* Check 32bit counter overflow. */
- u32 start = ctr;
- u32 end = start + blocks - 1;
-
- if (end < start) {
- ctr |= 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
- } else {
- /* Check 16bit counter overflow. */
- u16 start = ctr & 0xffff;
- u16 end = start + (u16)blocks - 1;
-
- if (blocks >> 16 || end < start) {
- ctr |= 0xffff;
- datalen = AES_BLOCK_SIZE * (0x10000-start);
- fragmented = true;
- }
+
+ /* Check 16bit counter overflow. */
+ start = ctr & 0xffff;
+ end = start + ctx->blocks - 1;
+
+ if (ctx->blocks >> 16 || end < start) {
+ ctr |= 0xffff;
+ datalen = AES_BLOCK_SIZE * (0x10000 - start);
+ fragmented = true;
}
+
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@@ -1131,7 +1120,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
- if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+ !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1150,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1275,12 +1263,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "atmel-ecb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1292,12 +1276,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "atmel-cbc-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1310,12 +1290,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ofb(aes)",
.base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1328,12 +1304,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1346,12 +1318,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb32(aes)",
.base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1364,12 +1332,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb16(aes)",
.base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1382,12 +1346,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb8(aes)",
.base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1400,12 +1360,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_ctr_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1420,12 +1376,8 @@ static struct skcipher_alg aes_algs[] = {
static struct skcipher_alg aes_cfb64_alg = {
.base.cra_name = "cfb64(aes)",
.base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB64_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1762,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1776,21 +1726,7 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 4:
- case 8:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return crypto_gcm_check_authsize(authsize);
}
static int atmel_aes_gcm_encrypt(struct aead_request *req)
@@ -1825,12 +1761,8 @@ static struct aead_alg aes_gcm_alg = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "atmel-gcm-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
};
@@ -1947,12 +1879,8 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aes_xts_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "atmel-xts-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2113,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
- u32 flags;
int err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -2123,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
goto badkey;
/* Save auth key. */
- flags = crypto_aead_get_flags(tfm);
err = atmel_sha_authenc_setkey(ctx->auth,
keys.authkey, keys.authkeylen,
- &flags);
- crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
+ crypto_aead_get_flags(tfm));
if (err) {
memzero_explicit(&keys, sizeof(keys));
return err;
@@ -2141,7 +2066,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2252,12 +2176,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2272,12 +2192,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2292,12 +2208,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2312,12 +2224,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2332,12 +2240,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
};
@@ -2364,47 +2268,30 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
free_page((unsigned long)dd->buf);
}
-static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- struct at_dma_slave *slave;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- slave = &pdata->dma_slave->rxdata;
- dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "tx");
- if (!dd->src.chan)
+ dd->src.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->src.chan)) {
+ ret = PTR_ERR(dd->src.chan);
goto err_dma_in;
+ }
- slave = &pdata->dma_slave->txdata;
- dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "rx");
- if (!dd->dst.chan)
+ dd->dst.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dst.chan)) {
+ ret = PTR_ERR(dd->dst.chan);
goto err_dma_out;
+ }
return 0;
err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2469,29 +2356,45 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
crypto_unregister_skcipher(&aes_algs[i]);
}
+static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
+{
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_alignmask = 0xf;
+ alg->cra_priority = ATMEL_AES_PRIORITY;
+ alg->cra_module = THIS_MODULE;
+}
+
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_algs[i].base);
+
err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
+ atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
+
err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
if (dd->caps.has_gcm) {
+ atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
+
err = crypto_register_aead(&aes_gcm_alg);
if (err)
goto err_aes_gcm_alg;
}
if (dd->caps.has_xts) {
+ atmel_aes_crypto_alg_init(&aes_xts_alg.base);
+
err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
@@ -2500,6 +2403,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
+
err = crypto_register_aead(&aes_authenc_algs[i]);
if (err)
goto err_aes_authenc_alg;
@@ -2533,7 +2438,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
- dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2544,7 +2448,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2553,7 +2456,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
@@ -2577,65 +2479,18 @@ static const struct of_device_id atmel_aes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-
-static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave) {
- devm_kfree(&pdev->dev, pdata);
- return ERR_PTR(-ENOMEM);
- }
-
- return pdata;
-}
-#else
-static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_aes_of_init(pdev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto aes_dd_err;
- }
- }
-
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto aes_dd_err;
- }
-
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
- if (aes_dd == NULL) {
- err = -ENOMEM;
- goto aes_dd_err;
- }
+ if (!aes_dd)
+ return -ENOMEM;
aes_dd->dev = dev;
@@ -2656,7 +2511,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2664,14 +2519,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd->irq = platform_get_irq(pdev, 0);
if (aes_dd->irq < 0) {
err = aes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
IRQF_SHARED, "atmel-aes", aes_dd);
if (err) {
dev_err(dev, "unable to request aes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2679,40 +2534,40 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(aes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
err = atmel_aes_hw_version_init(aes_dd);
if (err)
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
- goto err_aes_buff;
+ goto err_iclk_unprepare;
- err = atmel_aes_dma_init(aes_dd, pdata);
+ err = atmel_aes_dma_init(aes_dd);
if (err)
- goto err_aes_dma;
+ goto err_buff_cleanup;
spin_lock(&atmel_aes.lock);
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
@@ -2733,17 +2588,13 @@ err_algs:
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
-err_aes_dma:
+err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
-err_aes_buff:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(aes_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
-aes_dd_err:
- if (err != -EPROBE_DEFER)
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index d6de810df44f..c6530a1c8c20 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void);
struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode);
void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags);
+ const u8 *key, unsigned int keylen, u32 flags);
int atmel_sha_authenc_schedule(struct ahash_request *req,
struct atmel_sha_authenc_ctx *auth,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8ea0e4bcde0d..e536e2a6bbd8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -36,10 +37,11 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
#include "atmel-authenc.h"
+#define ATMEL_SHA_PRIORITY 300
+
/* SHA flags */
#define SHA_FLAGS_BUSY BIT(0)
#define SHA_FLAGS_FINAL BIT(1)
@@ -134,7 +136,6 @@ struct atmel_sha_dev {
void __iomem *io_base;
spinlock_t lock;
- int err;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
@@ -851,7 +852,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
0, final);
}
-static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -870,8 +871,6 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
ctx->block_size, DMA_TO_DEVICE);
}
-
- return 0;
}
static int atmel_sha_update_req(struct atmel_sha_dev *dd)
@@ -1025,7 +1024,6 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
dd->flags |= SHA_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -1036,9 +1034,13 @@ static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
}
-static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
{
- atmel_sha_hw_init(dd);
+ int err;
+
+ err = atmel_sha_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_sha_get_version(dd);
@@ -1046,6 +1048,8 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable(dd->iclk);
+
+ return 0;
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1248,130 +1252,66 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void atmel_sha_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_cra_init;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->finup = atmel_sha_finup;
+ alg->digest = atmel_sha_digest;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_1_256_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "atmel-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha1",
+ .halg.base.cra_driver_name = "atmel-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "atmel-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha256",
+ .halg.base.cra_driver_name = "atmel-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
};
static struct ahash_alg sha_224_alg = {
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "atmel-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha224",
+ .halg.base.cra_driver_name = "atmel-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
};
static struct ahash_alg sha_384_512_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "atmel-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha384",
+ .halg.base.cra_driver_name = "atmel-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "atmel-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha512",
+ .halg.base.cra_driver_name = "atmel-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -1395,10 +1335,6 @@ static int atmel_sha_done(struct atmel_sha_dev *dd)
if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
atmel_sha_update_dma_stop(dd);
- if (dd->err) {
- err = dd->err;
- goto finish;
- }
}
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
/* hash or semi-hash ready */
@@ -1493,7 +1429,6 @@ static void atmel_sha_dma_callback2(void *data)
struct scatterlist *sg;
int nents;
- dmaengine_terminate_all(dma->chan);
dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
sg = dma->sg;
@@ -1918,12 +1853,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return 0;
+ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
}
static int atmel_sha_hmac_init(struct ahash_request *req)
@@ -2084,131 +2014,61 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
atmel_sha_hmac_key_release(&hmac->hkey);
}
+static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
+ alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_hmac_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->digest = atmel_sha_hmac_digest;
+ alg->setkey = atmel_sha_hmac_setkey;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_hmac_algs[] = {
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "atmel-hmac-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha1)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "atmel-hmac-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha224)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "atmel-hmac-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha256)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "atmel-hmac-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha384)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "atmel-hmac-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha512)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -2347,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags)
+ const u8 *key, unsigned int keylen, u32 flags)
{
struct crypto_ahash *tfm = auth->tfm;
- int err;
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(tfm, key, keylen);
- *flags = crypto_ahash_get_flags(tfm);
-
- return err;
+ crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
+ return crypto_ahash_setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
@@ -2561,12 +2416,16 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ atmel_sha_alg_init(&sha_1_256_algs[i]);
+
err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
goto err_sha_1_256_algs;
}
if (dd->caps.has_sha224) {
+ atmel_sha_alg_init(&sha_224_alg);
+
err = crypto_register_ahash(&sha_224_alg);
if (err)
goto err_sha_224_algs;
@@ -2574,6 +2433,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_sha_384_512) {
for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ atmel_sha_alg_init(&sha_384_512_algs[i]);
+
err = crypto_register_ahash(&sha_384_512_algs[i]);
if (err)
goto err_sha_384_512_algs;
@@ -2582,6 +2443,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_hmac) {
for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
+ atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
+
err = crypto_register_ahash(&sha_hmac_algs[i]);
if (err)
goto err_sha_hmac_algs;
@@ -2608,35 +2471,14 @@ err_sha_1_256_algs:
return err;
}
-static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ dev_err(dd->dev, "DMA channel is not available\n");
+ return PTR_ERR(dd->dma_lch_in.chan);
}
-}
-static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask_in;
-
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan) {
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
- }
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
SHA_REG_DIN(0);
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -2709,49 +2551,18 @@ static const struct of_device_id atmel_sha_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-
-static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
int err;
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
- if (sha_dd == NULL) {
- err = -ENOMEM;
- goto sha_dd_err;
- }
+ if (!sha_dd)
+ return -ENOMEM;
sha_dd->dev = dev;
@@ -2772,7 +2583,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (!sha_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2780,14 +2591,14 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd->irq = platform_get_irq(pdev, 0);
if (sha_dd->irq < 0) {
err = sha_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
IRQF_SHARED, "atmel-sha", sha_dd);
if (err) {
dev_err(dev, "unable to request sha irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2795,41 +2606,30 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(sha_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
- atmel_sha_hw_version_init(sha_dd);
+ err = atmel_sha_hw_version_init(sha_dd);
+ if (err)
+ goto err_iclk_unprepare;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_sha_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto iclk_unprepare;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto iclk_unprepare;
- }
- err = atmel_sha_dma_init(sha_dd, pdata);
+ err = atmel_sha_dma_init(sha_dd);
if (err)
- goto err_sha_dma;
+ goto err_iclk_unprepare;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@@ -2855,14 +2655,11 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-err_sha_dma:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(sha_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
-sha_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 0c1f79b30fc1..ed40dbb98c6b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -30,31 +31,32 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
+#define ATMEL_TDES_PRIORITY 300
+
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x00ff
-#define TDES_FLAGS_ENCRYPT BIT(0)
-#define TDES_FLAGS_CBC BIT(1)
-#define TDES_FLAGS_CFB BIT(2)
-#define TDES_FLAGS_CFB8 BIT(3)
-#define TDES_FLAGS_CFB16 BIT(4)
-#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_CFB64 BIT(6)
-#define TDES_FLAGS_OFB BIT(7)
-
-#define TDES_FLAGS_INIT BIT(16)
-#define TDES_FLAGS_FAST BIT(17)
-#define TDES_FLAGS_BUSY BIT(18)
-#define TDES_FLAGS_DMA BIT(19)
+/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
+#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
+#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
+#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
+#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
+#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
+#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
+#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
+#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
+#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
+
+#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
+
+#define TDES_FLAGS_INIT BIT(3)
+#define TDES_FLAGS_FAST BIT(4)
+#define TDES_FLAGS_BUSY BIT(5)
+#define TDES_FLAGS_DMA BIT(6)
#define ATMEL_TDES_QUEUE_LENGTH 50
@@ -100,7 +102,6 @@ struct atmel_tdes_dev {
int irq;
unsigned long flags;
- int err;
spinlock_t lock;
struct crypto_queue queue;
@@ -189,7 +190,7 @@ static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
}
static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
- u32 *value, int count)
+ const u32 *value, int count)
{
for (; count--; value++, offset += 4)
atmel_tdes_write(dd, offset, *value);
@@ -226,7 +227,6 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
if (!(dd->flags & TDES_FLAGS_INIT)) {
atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
dd->flags |= TDES_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -237,9 +237,13 @@ static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
}
-static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
{
- atmel_tdes_hw_init(dd);
+ int err;
+
+ err = atmel_tdes_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_tdes_get_version(dd);
@@ -247,6 +251,8 @@ static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
+
+ return 0;
}
static void atmel_tdes_dma_callback(void *data)
@@ -260,7 +266,7 @@ static void atmel_tdes_dma_callback(void *data)
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
- u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+ u32 valmr = TDES_MR_SMOD_PDC;
err = atmel_tdes_hw_init(dd);
@@ -282,36 +288,15 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_TDESMOD_DES;
}
- if (dd->flags & TDES_FLAGS_CBC) {
- valmr |= TDES_MR_OPMOD_CBC;
- } else if (dd->flags & TDES_FLAGS_CFB) {
- valmr |= TDES_MR_OPMOD_CFB;
-
- if (dd->flags & TDES_FLAGS_CFB8)
- valmr |= TDES_MR_CFBS_8b;
- else if (dd->flags & TDES_FLAGS_CFB16)
- valmr |= TDES_MR_CFBS_16b;
- else if (dd->flags & TDES_FLAGS_CFB32)
- valmr |= TDES_MR_CFBS_32b;
- else if (dd->flags & TDES_FLAGS_CFB64)
- valmr |= TDES_MR_CFBS_64b;
- } else if (dd->flags & TDES_FLAGS_OFB) {
- valmr |= TDES_MR_OPMOD_OFB;
- }
-
- if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
- valmr |= TDES_MR_CYPHER_ENC;
+ valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
- atmel_tdes_write(dd, TDES_CR, valcr);
atmel_tdes_write(dd, TDES_MR, valmr);
atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
dd->ctx->keylen >> 2);
- if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
- }
return 0;
}
@@ -397,11 +382,11 @@ static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -411,12 +396,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
len32 = DIV_ROUND_UP(length, sizeof(u8));
- else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+ break;
+
+ case TDES_FLAGS_CFB16:
len32 = DIV_ROUND_UP(length, sizeof(u16));
- else
+ break;
+
+ default:
len32 = DIV_ROUND_UP(length, sizeof(u32));
+ break;
+ }
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -433,13 +425,14 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
+ enum dma_slave_buswidth addr_width;
dd->dma_size = length;
@@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if (dd->flags & TDES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & TDES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- } else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
+ addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+
+ case TDES_FLAGS_CFB16:
+ addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+
+ default:
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
}
+ dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
+ dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
+
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
@@ -504,8 +497,6 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(
- crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -561,9 +552,9 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
dd->total -= count;
if (dd->caps.has_dma)
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
else
- err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
@@ -600,12 +591,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
- atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
req->base.complete(&req->base, err);
}
@@ -699,35 +692,44 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
- if (mode & TDES_FLAGS_CFB8) {
+ switch (mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB16) {
+ break;
+
+ case TDES_FLAGS_CFB16:
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB32) {
+ break;
+
+ case TDES_FLAGS_CFB32:
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
- } else {
+ break;
+
+ default:
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
ctx->block_size = DES_BLOCK_SIZE;
+ break;
}
rctx->mode = mode;
- if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
+ !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -739,33 +741,17 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
return atmel_tdes_handle_queue(ctx->dd, req);
}
-static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan)
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ ret = PTR_ERR(dd->dma_lch_in.chan);
goto err_dma_in;
+ }
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
TDES_IDATA1R;
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -776,12 +762,12 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
- dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
- if (!dd->dma_lch_out.chan)
+ dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_out.chan)) {
+ ret = PTR_ERR(dd->dma_lch_out.chan);
goto err_dma_out;
+ }
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
TDES_ODATA1R;
dd->dma_lch_out.dma_conf.src_maxburst = 1;
@@ -797,8 +783,8 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -841,17 +827,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, 0);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
}
static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
@@ -860,50 +846,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
}
static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
}
static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
}
static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
}
static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
}
static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
@@ -925,18 +908,23 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
+static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
+{
+ alg->base.cra_priority = ATMEL_TDES_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = atmel_tdes_init_tfm;
+}
+
static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "atmel-ecb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = atmel_des_setkey,
@@ -946,14 +934,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "atmel-cbc-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -964,14 +947,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb(des)",
.base.cra_driver_name = "atmel-cfb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -982,14 +960,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb8(des)",
.base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1000,14 +973,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb16(des)",
.base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x1,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1018,14 +986,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb32(des)",
.base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x3,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1036,14 +999,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "atmel-ofb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1054,14 +1012,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1071,14 +1024,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "atmel-cbc-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1089,14 +1037,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des3_ede)",
.base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1123,8 +1066,6 @@ static void atmel_tdes_done_task(unsigned long data)
else
err = atmel_tdes_crypt_dma_stop(dd);
- err = dd->err ? : err;
-
if (dd->total && !err) {
if (dd->flags & TDES_FLAGS_FAST) {
dd->in_sg = sg_next(dd->in_sg);
@@ -1173,6 +1114,8 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+ atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
+
err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
@@ -1214,49 +1157,18 @@ static const struct of_device_id atmel_tdes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-
-static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
int err;
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
- if (tdes_dd == NULL) {
- err = -ENOMEM;
- goto tdes_dd_err;
- }
+ if (!tdes_dd)
+ return -ENOMEM;
tdes_dd->dev = dev;
@@ -1277,7 +1189,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (!tdes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1285,14 +1197,14 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->irq = platform_get_irq(pdev, 0);
if (tdes_dd->irq < 0) {
err = tdes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
IRQF_SHARED, "atmel-tdes", tdes_dd);
if (err) {
dev_err(dev, "unable to request tdes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -1300,41 +1212,30 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
- atmel_tdes_hw_version_init(tdes_dd);
+ err = atmel_tdes_hw_version_init(tdes_dd);
+ if (err)
+ goto err_tasklet_kill;
atmel_tdes_get_cap(tdes_dd);
err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_buff;
+ goto err_tasklet_kill;
if (tdes_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_tdes_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto err_pdata;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto err_pdata;
- }
- err = atmel_tdes_dma_init(tdes_dd, pdata);
+ err = atmel_tdes_dma_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_buff_cleanup;
dev_info(dev, "using %s, %s for DMA transfers\n",
dma_chan_name(tdes_dd->dma_lch_in.chan),
@@ -1359,15 +1260,11 @@ err_algs:
spin_unlock(&atmel_tdes.lock);
if (tdes_dd->caps.has_dma)
atmel_tdes_dma_cleanup(tdes_dd);
-err_tdes_dma:
-err_pdata:
+err_buff_cleanup:
atmel_tdes_buff_cleanup(tdes_dd);
-err_tdes_buff:
-res_err:
+err_tasklet_kill:
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
-tdes_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 4b20606983a4..fcf1effc7661 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
{
struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
- if (len != 16 && len != 24 && len != 32) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -1;
- }
+ if (len != 16 && len != 24 && len != 32)
+ return -EINVAL;
ctx->key_length = len;
@@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 32:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 64:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1564a6f8c9cb..c8b9408541a9 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -2894,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2916,7 +2910,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2967,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
keylen + ctx->salt_len);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2992,7 +2980,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 87053e46c788..fac5b2e26610 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -130,13 +130,13 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
scatterlist crypto API to the SEC4 via job ring.
config CRYPTO_DEV_FSL_CAAM_PKC_API
- bool "Register public key cryptography implementations with Crypto API"
- default y
- select CRYPTO_RSA
- help
- Selecting this will allow SEC Public key support for RSA.
- Supported cryptographic primitives: encryption, decryption,
- signature and verification.
+ bool "Register public key cryptography implementations with Crypto API"
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
config CRYPTO_DEV_FSL_CAAM_RNG_API
bool "Register caam device for hwrng API"
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2912006b946b..ef1a65f4fc92 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -619,7 +617,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 8e3449670d2f..4a29e0ef9d63 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -268,7 +268,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
@@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(jrdev, "key size mismatch\n");
- goto badkey;
+ return -EINVAL;
}
ctx->cdata.keylen = keylen;
@@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 3443f6d6dd83..28669cbecf77 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
+ goto out;
err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
aead_setkey(aead, key, keylen);
@@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE)
return -EINVAL;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(dev, "key size mismatch\n");
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2481,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
@@ -2998,15 +2972,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -3018,42 +2990,17 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
struct dpaa2_sg_entry *qm_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(dev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, state->buf_dma)) {
dev_err(dev, "unable to map buf\n");
@@ -3304,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
return ret;
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -3321,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -3383,9 +3329,17 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3440,9 +3394,17 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3464,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state), last_buflen;
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
@@ -3524,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req)
if (mapped_nents) {
sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
} else {
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
true);
@@ -3566,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -3592,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -3663,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, qm_sg_src_index;
int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -3852,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = -ENOMEM;
@@ -3925,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int qm_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -3977,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
-
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -4029,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4055,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -4151,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -4220,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
@@ -4257,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4288,10 +4229,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -4321,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -4348,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 65399cb2a770..8d9143407fc5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,15 +107,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -127,31 +125,6 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
static inline bool is_cmac_aes(u32 algtype)
{
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
@@ -183,12 +156,12 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map buf\n");
@@ -500,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return ahash_set_sh_desc(ahash);
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -510,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
- if (keylen != AES_KEYSIZE_128) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
@@ -533,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
/* key is immediate data for all cmac shared descriptors */
ctx->adata.key_virt = key;
@@ -578,7 +546,7 @@ static inline void ahash_unmap(struct device *dev,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -643,9 +611,17 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -703,9 +679,17 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -786,18 +770,16 @@ static int ahash_update_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
@@ -868,10 +850,6 @@ static int ahash_update_ctx(struct ahash_request *req)
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -901,14 +879,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -925,7 +900,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -991,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
@@ -1148,8 +1123,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -1207,11 +1182,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -1278,12 +1252,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
- if (*next_buflen) {
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- }
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1317,14 +1285,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1342,7 +1307,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1428,8 +1393,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
@@ -1491,10 +1457,6 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
desc = edesc->hw_desc;
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1517,14 +1479,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1548,10 +1510,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -1581,16 +1542,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -1608,9 +1561,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d7c3c3805693..7139366da016 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -99,10 +99,13 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1 ||
/*
- * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq")) {
+ of_machine_is_compatible("fsl,imx8mq") ||
+ of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -508,7 +511,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
- { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
@@ -671,11 +674,9 @@ static int caam_probe(struct platform_device *pdev)
of_node_put(np);
if (!ctrlpriv->mc_en)
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
handle_imx6_err005766(&ctrl->mcr);
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1ad66677d88e..1be1adffff1d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 6f80cc3b5c84..dce5423a5883 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
union fc_ctx_flags flags;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
/* fill crypto context */
fctx = nctx->u.fctx;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 97af4d50d003..18088b0a2257 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int aes_keylen;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
@@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
keylen /= 2;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
fctx = nctx->u.fctx;
/* copy KEY2 */
@@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 6b86f1e6d634..db362fe472ea 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,7 +8,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o
ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
-ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
+ sev-dev.o \
+ tee-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 32f19f402073..5eba7ee49e81 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff50ee80d223..9e8f07c1afac 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 33328a153225..51e12fbd1159 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 453b9797f93f..474e6f1a6a84 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
key_len = digest_size;
} else {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0186b3df4c87..0d5576f6ad21 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = {
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv3 = {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 7ca2d3408e7a..e95e7aa5dbf1 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -2,57 +2,20 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/spinlock_types.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/hw_random.h>
-#include <linux/ccp.h>
-#include <linux/firmware.h>
-
-#include <asm/smp.h>
+#include <linux/irqreturn.h>
#include "sp-dev.h"
#include "psp-dev.h"
+#include "sev-dev.h"
+#include "tee-dev.h"
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
-#define SEV_FW_NAME_SIZE 64
-
-static DEFINE_MUTEX(sev_cmd_mutex);
-static struct sev_misc_dev *misc_dev;
-static struct psp_device *psp_master;
-
-static int psp_cmd_timeout = 100;
-module_param(psp_cmd_timeout, int, 0644);
-MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
-
-static int psp_probe_timeout = 5;
-module_param(psp_probe_timeout, int, 0644);
-MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
-
-static bool psp_dead;
-static int psp_timeout;
-
-static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
-{
- if (psp_master->api_major > maj)
- return true;
- if (psp_master->api_major == maj && psp_master->api_minor >= min)
- return true;
- return false;
-}
+struct psp_device *psp_master;
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
@@ -75,902 +38,95 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
{
struct psp_device *psp = data;
unsigned int status;
- int reg;
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
- /* Check if it is command completion: */
- if (!(status & PSP_CMD_COMPLETE))
- goto done;
+ /* invoke subdevice interrupt handlers */
+ if (status) {
+ if (psp->sev_irq_handler)
+ psp->sev_irq_handler(irq, psp->sev_irq_data, status);
- /* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
- psp->sev_int_rcvd = 1;
- wake_up(&psp->sev_int_queue);
+ if (psp->tee_irq_handler)
+ psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
-done:
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
-static int sev_wait_cmd_ioc(struct psp_device *psp,
- unsigned int *reg, unsigned int timeout)
-{
- int ret;
-
- ret = wait_event_timeout(psp->sev_int_queue,
- psp->sev_int_rcvd, timeout * HZ);
- if (!ret)
- return -ETIMEDOUT;
-
- *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
-
- return 0;
-}
-
-static int sev_cmd_buffer_len(int cmd)
-{
- switch (cmd) {
- case SEV_CMD_INIT: return sizeof(struct sev_data_init);
- case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
- case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
- case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
- case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
- case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
- case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
- case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
- case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
- case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
- case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
- case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
- case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
- case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
- case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
- case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
- case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
- case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
- case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
- case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
- case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
- case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
- case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
- case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
- case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
- default: return 0;
- }
-
- return 0;
-}
-
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
-{
- struct psp_device *psp = psp_master;
- unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp_dead)
- return -EBUSY;
-
- /* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
-
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
- cmd, phys_msb, phys_lsb, psp_timeout);
-
- print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
- iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
-
- psp->sev_int_rcvd = 0;
-
- reg = cmd;
- reg <<= PSP_CMDRESP_CMD_SHIFT;
- reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
-
- /* wait for command completion */
- ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
- if (ret) {
- if (psp_ret)
- *psp_ret = 0;
-
- dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
- psp_dead = true;
-
- return ret;
- }
-
- psp_timeout = psp_cmd_timeout;
-
- if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
-
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
- ret = -EIO;
- }
-
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- return ret;
-}
-
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_do_cmd_locked(cmd, data, psp_ret);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int __sev_platform_init_locked(int *error)
-{
- struct psp_device *psp = psp_master;
- int rc = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp->sev_state == SEV_STATE_INIT)
- return 0;
-
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
- if (rc)
- return rc;
-
- psp->sev_state = SEV_STATE_INIT;
-
- /* Prepare for first SEV guest launch after INIT */
- wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
- return rc;
-
- dev_dbg(psp->dev, "SEV firmware initialized\n");
-
- return rc;
-}
-
-int sev_platform_init(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(sev_platform_init);
-
-static int __sev_platform_shutdown_locked(int *error)
-{
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
- return ret;
-
- psp_master->sev_state = SEV_STATE_UNINIT;
- dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
-
- return ret;
-}
-
-static int sev_platform_shutdown(int *error)
+static unsigned int psp_get_capability(struct psp_device *psp)
{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int sev_get_platform_state(int *state, int *error)
-{
- int rc;
-
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &psp_master->status_cmd_buf, error);
- if (rc)
- return rc;
-
- *state = psp_master->status_cmd_buf.state;
- return rc;
-}
-
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
-{
- int state, rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
/*
- * The SEV spec requires that FACTORY_RESET must be issued in
- * UNINIT state. Before we go further lets check if any guest is
- * active.
- *
- * If FW is in WORKING state then deny the request otherwise issue
- * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
- *
- */
- rc = sev_get_platform_state(&state, &argp->error);
- if (rc)
- return rc;
-
- if (state == SEV_STATE_WORKING)
- return -EBUSY;
-
- if (state == SEV_STATE_INIT) {
- rc = __sev_platform_shutdown_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_status *data = &psp_master->status_cmd_buf;
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
- if (ret)
- return ret;
-
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
- ret = -EFAULT;
-
- return ret;
-}
-
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
-{
- int rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
- void *blob = NULL;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* userspace wants to query CSR length */
- if (!input.address || !input.length)
- goto cmd;
-
- /* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
-
- blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->address = __psp_pa(blob);
- data->len = input.length;
-
-cmd:
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_blob;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
-
- /* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_blob;
- }
-
- if (blob) {
- if (copy_to_user((void __user *)input.address, blob, input.length))
- ret = -EFAULT;
- }
-
-e_free_blob:
- kfree(blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
-{
- if (!uaddr || !len)
- return ERR_PTR(-EINVAL);
-
- /* verify that blob length does not exceed our limit */
- if (len > SEV_FW_BLOB_MAX_SIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user((void __user *)(uintptr_t)uaddr, len);
-}
-EXPORT_SYMBOL_GPL(psp_copy_user_blob);
-
-static int sev_get_api_version(void)
-{
- struct sev_user_data_status *status;
- int error = 0, ret;
-
- status = &psp_master->status_cmd_buf;
- ret = sev_platform_status(status, &error);
- if (ret) {
- dev_err(psp_master->dev,
- "SEV: failed to get status. Error: %#x\n", error);
- return 1;
- }
-
- psp_master->api_major = status->api_major;
- psp_master->api_minor = status->api_minor;
- psp_master->build = status->build;
- psp_master->sev_state = status->state;
-
- return 0;
-}
-
-static int sev_get_firmware(struct device *dev,
- const struct firmware **firmware)
-{
- char fw_name_specific[SEV_FW_NAME_SIZE];
- char fw_name_subset[SEV_FW_NAME_SIZE];
-
- snprintf(fw_name_specific, sizeof(fw_name_specific),
- "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
-
- snprintf(fw_name_subset, sizeof(fw_name_subset),
- "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
- boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
-
- /* Check for SEV FW for a particular model.
- * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
- *
- * or
- *
- * Check for SEV FW common to a subset of models.
- * Ex. amd_sev_fam17h_model0xh.sbin for
- * Family 17h Model 00h -- Family 17h Model 0Fh
- *
- * or
- *
- * Fall-back to using generic name: sev.fw
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
*/
- if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
- (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
- (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return 0;
-
- return -ENOENT;
-}
-
-/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
-static int sev_update_firmware(struct device *dev)
-{
- struct sev_data_download_firmware *data;
- const struct firmware *firmware;
- int ret, error, order;
- struct page *p;
- u64 data_size;
-
- if (sev_get_firmware(dev, &firmware) == -ENOENT) {
- dev_dbg(dev, "No SEV firmware file present\n");
- return -1;
- }
-
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
- p = alloc_pages(GFP_KERNEL, order);
- if (!p) {
- ret = -1;
- goto fw_err;
}
- /*
- * Copy firmware data to a kernel allocated contiguous
- * memory region.
- */
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
-
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
-
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
- if (ret)
- dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
-
- __free_pages(p, order);
-
-fw_err:
- release_firmware(firmware);
-
- return ret;
+ return val;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int psp_check_sev_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
- void *pek_blob, *oca_blob;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* copy PEK certificate blobs from userspace */
- pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
-
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
-
- /* copy PEK certificate blobs from userspace */
- oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
- if (IS_ERR(oca_blob)) {
- ret = PTR_ERR(oca_blob);
- goto e_free_pek;
- }
-
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
-
- /* If platform is not in INIT state then transition it to INIT */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_oca;
+ /* Check if device supports SEV feature */
+ if (!(capability & 1)) {
+ dev_dbg(psp->dev, "psp does not support SEV\n");
+ return -ENODEV;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
-
-e_free_oca:
- kfree(oca_blob);
-e_free_pek:
- kfree(pek_blob);
-e_free:
- kfree(data);
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+static int psp_check_tee_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
- void *id_blob = NULL;
- int ret;
-
- /* SEV GET_ID is available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (input.address && input.length) {
- id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->address = __psp_pa(id_blob);
- data->len = input.length;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
-
- /*
- * Firmware will return the length of the ID value (either the minimum
- * required length or the actual length written), return it to the user.
- */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free;
- }
-
- if (id_blob) {
- if (copy_to_user((void __user *)input.address,
- id_blob, data->len)) {
- ret = -EFAULT;
- goto e_free;
- }
+ /* Check if device supports TEE feature */
+ if (!(capability & 2)) {
+ dev_dbg(psp->dev, "psp does not support TEE\n");
+ return -ENODEV;
}
-e_free:
- kfree(id_blob);
- kfree(data);
-
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+static int psp_check_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_data_get_id *data;
- u64 data_size, user_size;
- void *id_blob, *mem;
- int ret;
+ int sev_support = psp_check_sev_support(psp, capability);
+ int tee_support = psp_check_tee_support(psp, capability);
- /* SEV GET_ID available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- /* SEV FW expects the buffer it fills with the ID to be
- * 8-byte aligned. Memory allocated should be enough to
- * hold data structure + alignment padding + memory
- * where SEV FW writes the ID.
- */
- data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
- user_size = sizeof(struct sev_user_data_get_id);
-
- mem = kzalloc(data_size + user_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- data = mem;
- id_blob = mem + data_size;
-
- data->address = __psp_pa(id_blob);
- data->len = user_size;
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
- if (!ret) {
- if (copy_to_user((void __user *)argp->data, id_blob, data->len))
- ret = -EFAULT;
- }
-
- kfree(mem);
+ /* Return error if device neither supports SEV nor TEE */
+ if (sev_support && tee_support)
+ return -ENODEV;
- return ret;
+ return 0;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int psp_init(struct psp_device *psp, unsigned int capability)
{
- struct sev_user_data_pdh_cert_export input;
- void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
+ if (!psp_check_sev_support(psp, capability)) {
+ ret = sev_dev_init(psp);
if (ret)
return ret;
}
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Userspace wants to query the certificate length. */
- if (!input.pdh_cert_address ||
- !input.pdh_cert_len ||
- !input.cert_chain_address)
- goto cmd;
-
- /* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- /* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
-
- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
- if (!cert_blob) {
- ret = -ENOMEM;
- goto e_free_pdh;
- }
-
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
-
-cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
-
- /* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_cert;
- }
-
- if (pdh_blob) {
- if (copy_to_user((void __user *)input.pdh_cert_address,
- pdh_blob, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free_cert;
- }
- }
-
- if (cert_blob) {
- if (copy_to_user((void __user *)input.cert_chain_address,
- cert_blob, input.cert_chain_len))
- ret = -EFAULT;
- }
-
-e_free_cert:
- kfree(cert_blob);
-e_free_pdh:
- kfree(pdh_blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct sev_issue_cmd input;
- int ret = -EFAULT;
-
- if (!psp_master)
- return -ENODEV;
-
- if (ioctl != SEV_ISSUE_CMD)
- return -EINVAL;
-
- if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
- return -EFAULT;
-
- if (input.cmd > SEV_MAX)
- return -EINVAL;
-
- mutex_lock(&sev_cmd_mutex);
-
- switch (input.cmd) {
-
- case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
- break;
- case SEV_PLATFORM_STATUS:
- ret = sev_ioctl_do_platform_status(&input);
- break;
- case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
- break;
- case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
- break;
- case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
- break;
- case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
- break;
- case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
- break;
- case SEV_GET_ID:
- pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
- ret = sev_ioctl_do_get_id(&input);
- break;
- case SEV_GET_ID2:
- ret = sev_ioctl_do_get_id2(&input);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
- ret = -EFAULT;
-out:
- mutex_unlock(&sev_cmd_mutex);
-
- return ret;
-}
-
-static const struct file_operations sev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sev_ioctl,
-};
-
-int sev_platform_status(struct sev_user_data_status *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_platform_status);
-
-int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_deactivate);
-
-int sev_guest_activate(struct sev_data_activate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_activate);
-
-int sev_guest_decommission(struct sev_data_decommission *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_decommission);
-
-int sev_guest_df_flush(int *error)
-{
- return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_df_flush);
-
-static void sev_exit(struct kref *ref)
-{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
- misc_deregister(&misc_dev->misc);
-}
-
-static int sev_misc_init(struct psp_device *psp)
-{
- struct device *dev = psp->dev;
- int ret;
-
- /*
- * SEV feature support can be detected on multiple devices but the SEV
- * FW commands must be issued on the master. During probe, we do not
- * know the master hence we create /dev/sev on the first device probe.
- * sev_do_cmd() finds the right master device to which to issue the
- * command to the firmware.
- */
- if (!misc_dev) {
- struct miscdevice *misc;
-
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
- if (!misc_dev)
- return -ENOMEM;
-
- misc = &misc_dev->misc;
- misc->minor = MISC_DYNAMIC_MINOR;
- misc->name = DEVICE_NAME;
- misc->fops = &sev_fops;
-
- ret = misc_register(misc);
+ if (!psp_check_tee_support(psp, capability)) {
+ ret = tee_dev_init(psp);
if (ret)
return ret;
-
- kref_init(&misc_dev->refcount);
- } else {
- kref_get(&misc_dev->refcount);
- }
-
- init_waitqueue_head(&psp->sev_int_queue);
- psp->sev_misc = misc_dev;
- dev_dbg(dev, "registered SEV device\n");
-
- return 0;
-}
-
-static int psp_check_sev_support(struct psp_device *psp)
-{
- unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
-
- /*
- * Check for a access to the registers. If this read returns
- * 0xffffffff, it's likely that the system is running a broken
- * BIOS which disallows access to the device. Stop here and
- * fail the PSP initialization (but not the load, as the CCP
- * could get properly initialized).
- */
- if (val == 0xffffffff) {
- dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
- return -ENODEV;
- }
-
- if (!(val & 1)) {
- /* Device does not support the SEV feature */
- dev_dbg(psp->dev, "psp does not support SEV\n");
- return -ENODEV;
}
return 0;
@@ -980,6 +136,7 @@ int psp_dev_init(struct sp_device *sp)
{
struct device *dev = sp->dev;
struct psp_device *psp;
+ unsigned int capability;
int ret;
ret = -ENOMEM;
@@ -998,7 +155,11 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
- ret = psp_check_sev_support(psp);
+ capability = psp_get_capability(psp);
+ if (!capability)
+ goto e_disable;
+
+ ret = psp_check_support(psp, capability);
if (ret)
goto e_disable;
@@ -1013,7 +174,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = sev_misc_init(psp);
+ ret = psp_init(psp, capability);
if (ret)
goto e_irq;
@@ -1049,83 +210,52 @@ void psp_dev_destroy(struct sp_device *sp)
if (!psp)
return;
- if (psp->sev_misc)
- kref_put(&misc_dev->refcount, sev_exit);
+ sev_dev_destroy(psp);
+
+ tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
}
-int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
- void *data, int *error)
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
{
- if (!filep || filep->f_op != &sev_fops)
- return -EBADF;
-
- return sev_do_cmd(cmd, data, error);
+ psp->sev_irq_data = data;
+ psp->sev_irq_handler = handler;
}
-EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
-void psp_pci_init(void)
+void psp_clear_sev_irq_handler(struct psp_device *psp)
{
- struct sp_device *sp;
- int error, rc;
-
- sp = sp_get_psp_master_device();
- if (!sp)
- return;
-
- psp_master = sp->psp_data;
-
- psp_timeout = psp_probe_timeout;
+ psp_set_sev_irq_handler(psp, NULL, NULL);
+}
- if (sev_get_api_version())
- goto err;
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
+{
+ psp->tee_irq_data = data;
+ psp->tee_irq_handler = handler;
+}
- /*
- * If platform is not in UNINIT state then firmware upgrade and/or
- * platform INIT command will fail. These command require UNINIT state.
- *
- * In a normal boot we should never run into case where the firmware
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
- * may not go through a typical shutdown sequence and may leave the
- * firmware in INIT or WORKING state.
- */
+void psp_clear_tee_irq_handler(struct psp_device *psp)
+{
+ psp_set_tee_irq_handler(psp, NULL, NULL);
+}
- if (psp_master->sev_state != SEV_STATE_UNINIT) {
- sev_platform_shutdown(NULL);
- psp_master->sev_state = SEV_STATE_UNINIT;
- }
+struct psp_device *psp_get_master_device(void)
+{
+ struct sp_device *sp = sp_get_psp_master_device();
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(psp_master->dev) == 0)
- sev_get_api_version();
+ return sp ? sp->psp_data : NULL;
+}
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sp->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
+void psp_pci_init(void)
+{
+ psp_master = psp_get_master_device();
- if (rc) {
- dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_master)
return;
- }
-
- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
- psp_master->api_minor, psp_master->build);
-
- return;
-err:
- psp_master = NULL;
+ sev_pci_init();
}
void psp_pci_exit(void)
@@ -1133,5 +263,5 @@ void psp_pci_exit(void)
if (!psp_master)
return;
- sev_platform_shutdown(NULL);
+ sev_pci_exit();
}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index dd516b35ba86..ef38e4135d81 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
@@ -11,35 +11,20 @@
#define __PSP_DEV_H__
#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/dmapool.h>
-#include <linux/hw_random.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/psp-sev.h>
-#include <linux/miscdevice.h>
-#include <linux/capability.h>
#include "sp-dev.h"
-#define PSP_CMD_COMPLETE BIT(1)
-
-#define PSP_CMDRESP_CMD_SHIFT 16
-#define PSP_CMDRESP_IOC BIT(0)
#define PSP_CMDRESP_RESP BIT(31)
#define PSP_CMDRESP_ERR_MASK 0xffff
#define MAX_PSP_NAME_LEN 16
-struct sev_misc_dev {
- struct kref refcount;
- struct miscdevice misc;
-};
+extern struct psp_device *psp_master;
+
+typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
struct psp_device {
struct list_head entry;
@@ -52,16 +37,24 @@ struct psp_device {
void __iomem *io_regs;
- int sev_state;
- unsigned int sev_int_rcvd;
- wait_queue_head_t sev_int_queue;
- struct sev_misc_dev *sev_misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
+ psp_irq_handler_t sev_irq_handler;
+ void *sev_irq_data;
+
+ psp_irq_handler_t tee_irq_handler;
+ void *tee_irq_data;
- u8 api_major;
- u8 api_minor;
- u8 build;
+ void *sev_data;
+ void *tee_data;
};
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_sev_irq_handler(struct psp_device *psp);
+
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_tee_irq_handler(struct psp_device *psp);
+
+struct psp_device *psp_get_master_device(void);
+
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
new file mode 100644
index 000000000000..e467860f797d
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Encrypted Virtualization (SEV) interface
+ *
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+#include <linux/firmware.h>
+
+#include <asm/smp.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
+static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ if (sev->api_major > maj)
+ return true;
+
+ if (sev->api_major == maj && sev->api_minor >= min)
+ return true;
+
+ return false;
+}
+
+static void sev_irq_handler(int irq, void *data, unsigned int status)
+{
+ struct sev_device *sev = data;
+ int reg;
+
+ /* Check if it is command completion: */
+ if (!(status & SEV_CMD_COMPLETE))
+ return;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (reg & PSP_CMDRESP_RESP) {
+ sev->int_rcvd = 1;
+ wake_up(&sev->int_queue);
+ }
+}
+
+static int sev_wait_cmd_ioc(struct sev_device *sev,
+ unsigned int *reg, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(sev->int_queue,
+ sev->int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+
+ return 0;
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ sev = psp->sev_data;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+
+ sev->int_rcvd = 0;
+
+ reg = cmd;
+ reg <<= SEV_CMDRESP_CMD_SHIFT;
+ reg |= SEV_CMDRESP_IOC;
+ iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
+
+ /* wait for command completion */
+ ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ sev->state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ dev_dbg(sev->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+ sev->state = SEV_STATE_UNINIT;
+ dev_dbg(sev->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &sev->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = sev->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *data = &sev->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (sev->state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (sev->state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_get_api_version(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *status;
+ int error = 0, ret;
+
+ status = &sev->status_cmd_buf;
+ ret = sev_platform_status(status, &error);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to get status. Error: %#x\n", error);
+ return 1;
+ }
+
+ sev->api_major = status->api_major;
+ sev->api_minor = status->api_minor;
+ sev->build = status->build;
+ sev->state = status->state;
+
+ return 0;
+}
+
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+ struct sev_data_download_firmware *data;
+ const struct firmware *firmware;
+ int ret, error, order;
+ struct page *p;
+ u64 data_size;
+
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
+ return -1;
+ }
+
+ /*
+ * SEV FW expects the physical address given to it to be 32
+ * byte aligned. Memory allocated has structure placed at the
+ * beginning followed by the firmware being passed to the SEV
+ * FW. Allocate enough memory for data structure + alignment
+ * padding + SEV FW.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+ order = get_order(firmware->size + data_size);
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ ret = -1;
+ goto fw_err;
+ }
+
+ /*
+ * Copy firmware data to a kernel allocated contiguous
+ * memory region.
+ */
+ data = page_address(p);
+ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+ data->address = __psp_pa(page_address(p) + data_size);
+ data->len = firmware->size;
+
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ if (ret)
+ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+ else
+ dev_info(dev, "SEV firmware update successful\n");
+
+ __free_pages(p, order);
+
+fw_err:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (sev->state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+ struct sev_data_get_id *data;
+ u64 data_size, user_size;
+ void *id_blob, *mem;
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+ * 8-byte aligned. Memory allocated should be enough to
+ * hold data structure + alignment padding + memory
+ * where SEV FW writes the ID.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+ user_size = sizeof(struct sev_user_data_get_id);
+
+ mem = kzalloc(data_size + user_size, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ data = mem;
+ id_blob = mem + data_size;
+
+ data->address = __psp_pa(id_blob);
+ data->len = user_size;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ if (!ret) {
+ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+ ret = -EFAULT;
+ }
+
+ kfree(mem);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
+ ret = sev_ioctl_do_get_id(&input);
+ break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct sev_device *sev)
+{
+ struct device *dev = sev->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&sev->int_queue);
+ sev->misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+int sev_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sev_device *sev;
+ int ret = -ENOMEM;
+
+ sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ goto e_err;
+
+ psp->sev_data = sev;
+
+ sev->dev = dev;
+ sev->psp = psp;
+
+ sev->io_regs = psp->io_regs;
+
+ sev->vdata = (struct sev_vdata *)psp->vdata->sev;
+ if (!sev->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "sev: missing driver data\n");
+ goto e_err;
+ }
+
+ psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+
+ ret = sev_misc_init(sev);
+ if (ret)
+ goto e_irq;
+
+ dev_notice(dev, "sev enabled\n");
+
+ return 0;
+
+e_irq:
+ psp_clear_sev_irq_handler(psp);
+e_err:
+ psp->sev_data = NULL;
+
+ dev_notice(dev, "sev initialization failed\n");
+
+ return ret;
+}
+
+void sev_dev_destroy(struct psp_device *psp)
+{
+ struct sev_device *sev = psp->sev_data;
+
+ if (!sev)
+ return;
+
+ if (sev->misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ psp_clear_sev_irq_handler(psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void sev_pci_init(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int error, rc;
+
+ if (!sev)
+ return;
+
+ psp_timeout = psp_probe_timeout;
+
+ if (sev_get_api_version())
+ goto err;
+
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (sev->state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ sev->state = SEV_STATE_UNINIT;
+ }
+
+ if (sev_version_greater_or_equal(0, 15) &&
+ sev_update_firmware(sev->dev) == 0)
+ sev_get_api_version();
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
+ if (rc) {
+ dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ return;
+ }
+
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return;
+
+err:
+ psp_master->sev_data = NULL;
+}
+
+void sev_pci_exit(void)
+{
+ if (!psp_master->sev_data)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
new file mode 100644
index 000000000000..dd5c4fe82914
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __SEV_DEV_H__
+#define __SEV_DEV_H__
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+#include <linux/capability.h>
+
+#define SEV_CMD_COMPLETE BIT(1)
+#define SEV_CMDRESP_CMD_SHIFT 16
+#define SEV_CMDRESP_IOC BIT(0)
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sev_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ void __iomem *io_regs;
+
+ struct sev_vdata *vdata;
+
+ int state;
+ unsigned int int_rcvd;
+ wait_queue_head_t int_queue;
+ struct sev_misc_dev *misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+
+ u8 api_major;
+ u8 api_minor;
+ u8 build;
+};
+
+int sev_dev_init(struct psp_device *psp);
+void sev_dev_destroy(struct psp_device *psp);
+
+void sev_pci_init(void);
+void sev_pci_exit(void);
+
+#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 53c12562d31e..423594608ad1 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -39,10 +39,23 @@ struct ccp_vdata {
const unsigned int rsamax;
};
-struct psp_vdata {
+struct sev_vdata {
const unsigned int cmdresp_reg;
const unsigned int cmdbuff_addr_lo_reg;
const unsigned int cmdbuff_addr_hi_reg;
+};
+
+struct tee_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int ring_wptr_reg;
+ const unsigned int ring_rptr_reg;
+};
+
+struct psp_vdata {
+ const struct sev_vdata *sev;
+ const struct tee_vdata *tee;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b29d2e663e10..56c1f61c0f84 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -262,19 +262,42 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata pspv1 = {
+static const struct sev_vdata sevv1 = {
.cmdresp_reg = 0x10580,
.cmdbuff_addr_lo_reg = 0x105e0,
.cmdbuff_addr_hi_reg = 0x105e4,
+};
+
+static const struct sev_vdata sevv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+};
+
+static const struct tee_vdata teev1 = {
+ .cmdresp_reg = 0x10544,
+ .cmdbuff_addr_lo_reg = 0x10548,
+ .cmdbuff_addr_hi_reg = 0x1054c,
+ .ring_wptr_reg = 0x10550,
+ .ring_rptr_reg = 0x10554,
+};
+
+static const struct psp_vdata pspv1 = {
+ .sev = &sevv1,
.feature_reg = 0x105fc,
.inten_reg = 0x10610,
.intsts_reg = 0x10614,
};
static const struct psp_vdata pspv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .sev = &sevv2,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
+};
+
+static const struct psp_vdata pspv3 = {
+ .tee = &teev1,
.feature_reg = 0x109fc,
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
@@ -312,12 +335,22 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv2,
#endif
},
+ { /* 4 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv3,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
new file mode 100644
index 000000000000..5e697a90ea7f
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-tee.h>
+
+#include "psp-dev.h"
+#include "tee-dev.h"
+
+static bool psp_dead;
+
+static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+ void *start_addr;
+
+ if (!ring_size)
+ return -EINVAL;
+
+ /* We need actual physical address instead of DMA address, since
+ * Trusted OS running on AMD Secure Processor will map this region
+ */
+ start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size));
+ if (!start_addr)
+ return -ENOMEM;
+
+ rb_mgr->ring_start = start_addr;
+ rb_mgr->ring_size = ring_size;
+ rb_mgr->ring_pa = __psp_pa(start_addr);
+ mutex_init(&rb_mgr->mutex);
+
+ return 0;
+}
+
+static void tee_free_ring(struct psp_tee_device *tee)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+
+ if (!rb_mgr->ring_start)
+ return;
+
+ free_pages((unsigned long)rb_mgr->ring_start,
+ get_order(rb_mgr->ring_size));
+
+ rb_mgr->ring_start = NULL;
+ rb_mgr->ring_size = 0;
+ rb_mgr->ring_pa = 0;
+ mutex_destroy(&rb_mgr->mutex);
+}
+
+static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
+ unsigned int *reg)
+{
+ /* ~10ms sleep per loop => nloop = timeout * 100 */
+ int nloop = timeout * 100;
+
+ while (--nloop) {
+ *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ usleep_range(10000, 10100);
+ }
+
+ dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+static
+struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
+{
+ struct tee_init_ring_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa);
+ cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa);
+ cmd->size = tee->rb_mgr.ring_size;
+
+ dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n",
+ cmd->hi_addr, cmd->low_addr, cmd->size);
+
+ return cmd;
+}
+
+static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+static int tee_init_ring(struct psp_tee_device *tee)
+{
+ int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
+ struct tee_init_ring_cmd *cmd;
+ phys_addr_t cmd_buffer;
+ unsigned int reg;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024);
+
+ ret = tee_alloc_ring(tee, ring_size);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring allocation failed %d\n", ret);
+ return ret;
+ }
+
+ tee->rb_mgr.wptr = 0;
+
+ cmd = tee_alloc_cmd_buffer(tee);
+ if (!cmd) {
+ tee_free_ring(tee);
+ return -ENOMEM;
+ }
+
+ cmd_buffer = __psp_pa((void *)cmd);
+
+ /* Send command buffer details to Trusted OS by writing to
+ * CPU-PSP message registers
+ */
+
+ iowrite32(lower_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(upper_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
+ iowrite32(TEE_RING_INIT_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring init command timed out\n");
+ tee_free_ring(tee);
+ goto free_buf;
+ }
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ tee_free_ring(tee);
+ ret = -EIO;
+ }
+
+free_buf:
+ tee_free_cmd_buffer(cmd);
+
+ return ret;
+}
+
+static void tee_destroy_ring(struct psp_tee_device *tee)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!tee->rb_mgr.ring_start)
+ return;
+
+ if (psp_dead)
+ goto free_ring;
+
+ iowrite32(TEE_RING_DESTROY_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring destroy command timed out\n");
+ } else if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ }
+
+free_ring:
+ tee_free_ring(tee);
+}
+
+int tee_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_tee_device *tee;
+ int ret;
+
+ ret = -ENOMEM;
+ tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL);
+ if (!tee)
+ goto e_err;
+
+ psp->tee_data = tee;
+
+ tee->dev = dev;
+ tee->psp = psp;
+
+ tee->io_regs = psp->io_regs;
+
+ tee->vdata = (struct tee_vdata *)psp->vdata->tee;
+ if (!tee->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "tee: missing driver data\n");
+ goto e_err;
+ }
+
+ ret = tee_init_ring(tee);
+ if (ret) {
+ dev_err(dev, "tee: failed to init ring buffer\n");
+ goto e_err;
+ }
+
+ dev_notice(dev, "tee enabled\n");
+
+ return 0;
+
+e_err:
+ psp->tee_data = NULL;
+
+ dev_notice(dev, "tee initialization failed\n");
+
+ return ret;
+}
+
+void tee_dev_destroy(struct psp_device *psp)
+{
+ struct psp_tee_device *tee = psp->tee_data;
+
+ if (!tee)
+ return;
+
+ tee_destroy_ring(tee);
+}
+
+static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ void *buf, size_t len, struct tee_ring_cmd **resp)
+{
+ struct tee_ring_cmd *cmd;
+ u32 rptr, wptr;
+ int nloop = 1000, ret = 0;
+
+ *resp = NULL;
+
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ wptr = tee->rb_mgr.wptr;
+
+ /* Check if ring buffer is full */
+ do {
+ rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+
+ if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ break;
+
+ dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+
+ /* Wait if ring buffer is full */
+ mutex_unlock(&tee->rb_mgr.mutex);
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ } while (--nloop);
+
+ if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Pointer to empty data entry in ring buffer */
+ cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+
+ /* Write command data into ring buffer */
+ cmd->cmd_id = cmd_id;
+ cmd->cmd_state = TEE_CMD_STATE_INIT;
+ memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ memcpy(&cmd->buf[0], buf, len);
+
+ /* Update local copy of write pointer */
+ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+ tee->rb_mgr.wptr = 0;
+
+ /* Trigger interrupt to Trusted OS */
+ iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg);
+
+ /* The response is provided by Trusted OS in same
+ * location as submitted data entry within ring buffer.
+ */
+ *resp = cmd;
+
+unlock:
+ mutex_unlock(&tee->rb_mgr.mutex);
+
+ return ret;
+}
+
+static int tee_wait_cmd_completion(struct psp_tee_device *tee,
+ struct tee_ring_cmd *resp,
+ unsigned int timeout)
+{
+ /* ~5ms sleep per loop => nloop = timeout * 200 */
+ int nloop = timeout * 200;
+
+ while (--nloop) {
+ if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
+ resp->cmd_id);
+
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_tee_device *tee;
+ struct tee_ring_cmd *resp;
+ int ret;
+
+ if (!buf || !status || !len || len > sizeof(resp->buf))
+ return -EINVAL;
+
+ *status = 0;
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ tee = psp->tee_data;
+
+ ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp);
+ if (ret)
+ return ret;
+
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &resp->buf[0], len);
+ *status = resp->status;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_tee_process_cmd);
+
+int psp_check_tee_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_tee_status);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
new file mode 100644
index 000000000000..f09960112115
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ */
+
+/* This file describes the TEE communication interface between host and AMD
+ * Secure Processor
+ */
+
+#ifndef __TEE_DEV_H__
+#define __TEE_DEV_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#define TEE_DEFAULT_TIMEOUT 10
+#define MAX_BUFFER_SIZE 992
+
+/**
+ * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+ * @TEE_RING_INIT_CMD: Initialize ring buffer
+ * @TEE_RING_DESTROY_CMD: Destroy ring buffer
+ * @TEE_RING_MAX_CMD: Maximum command id
+ */
+enum tee_ring_cmd_id {
+ TEE_RING_INIT_CMD = 0x00010000,
+ TEE_RING_DESTROY_CMD = 0x00020000,
+ TEE_RING_MAX_CMD = 0x000F0000,
+};
+
+/**
+ * struct tee_init_ring_cmd - Command to init TEE ring buffer
+ * @low_addr: bits [31:0] of the physical address of ring buffer
+ * @hi_addr: bits [63:32] of the physical address of ring buffer
+ * @size: size of ring buffer in bytes
+ */
+struct tee_init_ring_cmd {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+#define MAX_RING_BUFFER_ENTRIES 32
+
+/**
+ * struct ring_buf_manager - Helper structure to manage ring buffer.
+ * @ring_start: starting address of ring buffer
+ * @ring_size: size of ring buffer in bytes
+ * @ring_pa: physical address of ring buffer
+ * @wptr: index to the last written entry in ring buffer
+ */
+struct ring_buf_manager {
+ struct mutex mutex; /* synchronizes access to ring buffer */
+ void *ring_start;
+ u32 ring_size;
+ phys_addr_t ring_pa;
+ u32 wptr;
+};
+
+struct psp_tee_device {
+ struct device *dev;
+ struct psp_device *psp;
+ void __iomem *io_regs;
+ struct tee_vdata *vdata;
+ struct ring_buf_manager rb_mgr;
+};
+
+/**
+ * enum tee_cmd_state - TEE command states for the ring buffer interface
+ * @TEE_CMD_STATE_INIT: initial state of command when sent from host
+ * @TEE_CMD_STATE_PROCESS: command being processed by TEE environment
+ * @TEE_CMD_STATE_COMPLETED: command processing completed
+ */
+enum tee_cmd_state {
+ TEE_CMD_STATE_INIT,
+ TEE_CMD_STATE_PROCESS,
+ TEE_CMD_STATE_COMPLETED,
+};
+
+/**
+ * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+ * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
+ * interface
+ * @cmd_state: refers to &enum tee_cmd_state
+ * @status: status of TEE command execution
+ * @res0: reserved region
+ * @pdata: private data (currently unused)
+ * @res1: reserved region
+ * @buf: TEE command specific buffer
+ */
+struct tee_ring_cmd {
+ u32 cmd_id;
+ u32 cmd_state;
+ u32 status;
+ u32 res0[1];
+ u64 pdata;
+ u32 res1[2];
+ u8 buf[MAX_BUFFER_SIZE];
+
+ /* Total size: 1024 bytes */
+} __packed;
+
+int tee_dev_init(struct psp_device *psp);
+void tee_dev_destroy(struct psp_device *psp);
+
+#endif /* __TEE_DEV_H__ */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 64d318dc0d47..2fc0e0da790b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
* revealed the decrypted message --> zero its memory.
*/
sg_zero_buffer(areq->dst, sg_nents(areq->dst),
- areq->cryptlen, 0);
+ areq->cryptlen, areq->assoclen);
err = -EBADMSG;
}
/*ENCRYPT*/
@@ -385,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
return -EINVAL;
break;
default:
- dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -399,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
if (ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_192 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
- goto badkey;
+ return rc;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
@@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
- rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
- goto badkey;
+ return -EINVAL;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
@@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = validate_keys_sizes(ctx);
if (rc)
- goto badkey;
+ return rc;
/* STAT_PHASE_1: Copy key to ctx */
@@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
- goto badkey;
+ return rc;
}
/* STAT_PHASE_2: Create sequence */
@@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
break; /* No auth. key setup */
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- rc = -ENOTSUPP;
- goto badkey;
+ return -ENOTSUPP;
}
/* STAT_PHASE_3: Submit sequence to HW */
@@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto setkey_error;
+ return rc;
}
}
/* Update STAT_PHASE_3 */
return rc;
-
-badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
-setkey_error:
- return rc;
}
static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
@@ -1567,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1925,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req,
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, areq_ctx->assoclen);
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2065,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2115,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2234,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2265,7 +2256,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2299,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2330,7 +2321,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3112b58d0bb1..7d6252d892d7 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
dev_err(dev, "Unsupported protected key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -303,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
keylen = hki.keylen;
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -394,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -523,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
}
}
+
static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
@@ -534,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
- dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
@@ -571,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -836,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req,
/* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
- dev_err(dev, "Unsupported data size %d.\n", nbytes);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
goto exit_process;
}
@@ -881,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup IV and XEX key used */
+ /* Setup state (IV) */
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Setup MLLI line, if needed */
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+ /* Setup state (IV and XEX key) */
+ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
/* Read next IV */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..532bc95a8373 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -133,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
- /* if driver suspended return, probebly shared interrupt */
+ /* if driver suspended return, probably shared interrupt */
if (cc_pm_is_dev_suspended(dev))
return IRQ_NONE;
@@ -271,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
struct clk *clk;
+ int irq;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -337,9 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
&req_mem_cc_regs->start, new_drvdata->cc_base);
/* Then IRQ */
- new_drvdata->irq = platform_get_irq(plat_dev, 0);
- if (new_drvdata->irq < 0)
- return new_drvdata->irq;
+ irq = platform_get_irq(plat_dev, 0);
+ if (irq < 0)
+ return irq;
init_completion(&new_drvdata->hw_queue_avail);
@@ -442,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
/* register the driver isr function */
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
+ new_drvdata);
if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
@@ -465,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_fips_init(new_drvdata);
if (rc) {
- dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
goto post_debugfs_err;
}
rc = cc_sram_mgr_init(new_drvdata);
@@ -490,13 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_buffer_mgr_init(new_drvdata);
if (rc) {
- dev_err(dev, "buffer_mgr_init failed\n");
+ dev_err(dev, "cc_buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
if (rc) {
- dev_err(dev, "ssi_power_mgr_init failed\n");
+ dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index ab31d4a68c80..c227718ba992 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -28,7 +28,6 @@
/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
-#define CC_DEV_SHA_MAX 512
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
#include "cc_sram_mgr.h"
@@ -133,13 +132,11 @@ struct cc_crypto_req {
/**
* struct cc_drvdata - driver private data context
* @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @irq: bitmap indicating source of last interrupt
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
- u32 irq_mask;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
@@ -161,6 +158,7 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
+ bool pm_on;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 4c8bce33abcf..702aefc21447 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -120,7 +120,7 @@ static void fips_dsr(unsigned long devarg)
cc_tee_handle_fips_error(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt.
*/
val = (CC_REG(HOST_IMR) & ~irq);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..912e5ce5079d 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -2358,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(cc_digest_len_init);
-#endif
default:
return digest_len_addr; /*to avoid kernel crash*/
}
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index dbc508fb719b..24c368b866f6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- rc = cc_suspend_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
- return rc;
- }
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
@@ -48,7 +42,7 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
- /* wait for Crytpcell reset completion */
+ /* wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
return -EBUSY;
@@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev)
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
- rc = cc_resume_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
return 0;
@@ -80,28 +67,20 @@ int cc_pm_get(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (cc_req_queue_suspended(drvdata))
+ if (drvdata->pm_on)
rc = pm_runtime_get_sync(dev);
- else
- pm_runtime_get_noresume(dev);
- return rc;
+ return (rc == 1 ? 0 : rc);
}
-int cc_pm_put_suspend(struct device *dev)
+void cc_pm_put_suspend(struct device *dev)
{
- int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (!cc_req_queue_suspended(drvdata)) {
+ if (drvdata->pm_on) {
pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
+ pm_runtime_put_autosuspend(dev);
}
- return rc;
}
bool cc_pm_is_dev_suspended(struct device *dev)
@@ -114,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- /* must be before the enabling to avoid resdundent suspending */
+ /* must be before the enabling to avoid redundant suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
+ /* set us as active - note we won't do PM ops until cc_pm_go()! */
return pm_runtime_set_active(dev);
}
@@ -125,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = true;
}
void cc_pm_fini(struct cc_drvdata *drvdata)
{
pm_runtime_disable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = false;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index a7d98a5da2e1..80a18e11cae4 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
-int cc_pm_put_suspend(struct device *dev);
+void cc_pm_put_suspend(struct device *dev);
bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-static inline int cc_pm_suspend(struct device *dev)
-{
- return 0;
-}
-
-static inline int cc_pm_resume(struct device *dev)
-{
- return 0;
-}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
}
-static inline int cc_pm_put_suspend(struct device *dev)
-{
- return 0;
-}
+static inline void cc_pm_put_suspend(struct device *dev) {}
static inline bool cc_pm_is_dev_suspended(struct device *dev)
{
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index a947d5a2cf35..9d61e6f12478 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
#else
struct tasklet_struct comptask;
#endif
- bool is_runtime_suspended;
};
struct cc_bl_item {
@@ -230,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
- * be chaned during the poll because the spinlock_bh
+ * be changed during the poll because the spinlock_bh
* is held by the thread
*/
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
@@ -275,12 +274,11 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
* \param len The crypto sequence length
* \param add_comp If "true": add an artificial dout DMA to mark completion
*
- * \return int Returns -EINPROGRESS or error code
*/
-static int cc_do_send_request(struct cc_drvdata *drvdata,
- struct cc_crypto_req *cc_req,
- struct cc_hw_desc *desc, unsigned int len,
- bool add_comp)
+static void cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
@@ -303,8 +301,8 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
@@ -328,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/* Update the free slots in HW queue */
req_mgr_h->q_free_slots -= total_seq_len;
}
-
- /* Operation still in process */
- return -EINPROGRESS;
}
static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
@@ -390,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
return;
}
- rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
- bli->len, false);
-
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
+ false);
spin_unlock(&mgr->hw_lock);
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- creq->user_cb(dev, req, rc);
- }
-
/* Remove ourselves from the backlog list */
spin_lock(&mgr->bl_lock);
list_del(&bli->list);
--mgr->bl_len;
+ kfree(bli);
}
spin_unlock(&mgr->bl_lock);
@@ -422,7 +412,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -451,8 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
return -EBUSY;
}
- if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
+ if (!rc) {
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
+ rc = -EINPROGRESS;
+ }
spin_unlock_bh(&mgr->hw_lock);
return rc;
@@ -472,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -492,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
reinit_completion(&drvdata->hw_queue_avail);
}
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
spin_unlock_bh(&mgr->hw_lock);
-
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- return rc;
- }
-
wait_for_completion(&cc_req->seq_compl);
return 0;
}
@@ -532,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
enqueue_seq(drvdata, desc, len);
@@ -668,7 +654,7 @@ static void comp_handler(unsigned long devarg)
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
@@ -677,52 +663,3 @@ static void comp_handler(unsigned long devarg)
cc_proc_backlog(drvdata);
dev_dbg(dev, "Comp. handler done.\n");
}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens
- * inside the spin lock protection
- */
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int cc_suspend_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index f46cf766fe4d..ff7746aaaf35 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata);
-
-int cc_suspend_req_queue(struct cc_drvdata *drvdata);
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
-#endif
-
#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 91e424378217..f078b2686418 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO
will be called chcr.
config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- ---help---
- Enable support for IPSec Tx Inline.
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- ---help---
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS_TOE
+ select CRYPTO_DEV_CHELSIO
+ ---help---
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
- To compile this driver as a module, choose M here: the module
- will be called chtls.
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 1b4a5664e604..b4b9b22125d1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- int err = 0;
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
- return err;
+ return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx)
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id =
+ (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_SW;
break;
default:
-
- crypto_tfm_set_flags((struct crypto_tfm *) tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
return chcr_ccm_common_setkey(aead, key, keylen);
@@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
int error;
if (keylen < 3) {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
keylen -= 3;
@@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
& CRYPTO_TFM_REQ_MASK);
ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (ret)
goto out;
@@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
@@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
if (get_alg_config(&param, max_authsize)) {
pr_err("chcr : Unsupported digest size\n");
@@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
+
subtype = get_aead_subtype(authenc);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 029a7354f541..e937605670ac 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- struct adapter *adap;
-
mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
@@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
list_move(&u_ctx->entry, &drv_data.inact_dev);
if (list_empty(&drv_data.act_dev))
drv_data.last_dev = NULL;
- adap = padap(&u_ctx->dev);
- memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
atomic_dec(&drv_data.dev_count);
mutex_unlock(&drv_data.drv_mutex);
@@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void)
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
stop_crypto();
-
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
mutex_lock(&drv_data.drv_mutex);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index d2bc655ab931..459442704eb1 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -179,7 +179,10 @@ struct chtls_hws {
u32 copied_seq;
u64 tx_seq_no;
struct tls_scmd scmd;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ } crypto_info;
};
struct chtls_sock {
@@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index aca75237bbcf..d4674589e1aa 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+ kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
@@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
if (!reply_skb) {
req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
@@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
-/*
- * Add an skb to the deferred skb queue for processing from process context.
- */
-static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
- defer_handler_t handler)
-{
- DEFERRED_SKB_CB(skb)->handler = handler;
- spin_lock_bh(&cdev->deferq.lock);
- __skb_queue_tail(&cdev->deferq, skb);
- if (skb_queue_len(&cdev->deferq) == 1)
- schedule_work(&cdev->deferq_task);
- spin_unlock_bh(&cdev->deferq.lock);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
-static struct sk_buff *dequeue_wr(struct sock *sk)
-{
- struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct sk_buff *skb = csk->wr_skb_head;
-
- if (likely(skb)) {
- /* Don't bother clearing the tail */
- csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
- WR_SKB_CB(skb)->next_wr = NULL;
- }
- return skb;
-}
-
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 129d7ac649a9..3fac0c74a41f 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline void chtls_reset_wr_list(struct chtls_sock *csk)
+{
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+}
+
static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
{
WR_SKB_CB(skb)->next_wr = NULL;
@@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
csk->wr_skb_tail = skb;
}
+
+static inline struct sk_buff *dequeue_wr(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb = NULL;
+
+ skb = csk->wr_skb_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+ WR_SKB_CB(skb)->next_wr = NULL;
+ }
+ return skb;
+}
#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 2a34035d3cfb..f1820aca0d33 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
static int chtls_key_info(struct chtls_sock *csk,
struct _key_ctx *kctx,
- u32 keylen, u32 optname)
+ u32 keylen, u32 optname,
+ int cipher_type)
{
- unsigned char key[AES_KEYSIZE_128];
- struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
+ unsigned char key[AES_MAX_KEY_SIZE];
+ unsigned char *key_p, *salt;
unsigned char ghash_h[AEAD_H_SIZE];
- int ck_size, key_ctx_size;
+ int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
struct crypto_aes_ctx aes;
int ret;
- gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
- &csk->tlshws.crypto_info;
-
key_ctx_size = sizeof(struct _key_ctx) +
roundup(keylen, 16) + AEAD_H_SIZE;
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else {
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * prepare key context base on GCM cipher type
+ */
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
+ (struct tls12_crypto_info_aes_gcm_128 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_128->key, keylen);
+
+ key_p = gcm_ctx_128->key;
+ salt = gcm_ctx_128->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
+ (struct tls12_crypto_info_aes_gcm_256 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_256->key, keylen);
+
+ key_p = gcm_ctx_256->key;
+ salt = gcm_ctx_256->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ break;
+ }
+ default:
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
}
- memcpy(key, gcm_ctx->key, keylen);
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
@@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk,
key_ctx = ((key_ctx_size >> 4) << 3);
kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx);
chtls_rxkey_ivauth(kctx);
} else {
kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx_size >> 4);
}
- memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(kctx->key, gcm_ctx->key, keylen);
+ memcpy(kctx->salt, salt, salt_size);
+ memcpy(kctx->key, key_p, keylen);
memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
/* erase key info from driver */
- memset(gcm_ctx->key, 0, keylen);
+ memset(key_p, 0, keylen);
return 0;
}
@@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk)
SCMD_TLS_FRAG_ENABLE_V(1);
}
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ u32 optname, int cipher_type)
{
struct tls_key_req *kwr;
struct chtls_dev *cdev;
@@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
kwr->sc_imm.len = cpu_to_be32(klen);
+ lock_sock(sk);
/* key info */
kctx = (struct _key_ctx *)(kwr + 1);
- ret = chtls_key_info(csk, kctx, keylen, optname);
+ ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
if (ret)
goto out_notcb;
@@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
csk->tlshws.txkey = keyid;
}
+ release_sock(sk);
return ret;
out_notcb:
+ release_sock(sk);
free_tls_keyid(sk);
out_nokey:
kfree_skb(skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 18996935d8ba..a038de90b2ea 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
- int err;
if (sk->sk_protocol != IPPROTO_TCP)
return -EPROTONOSUPPORT;
@@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
- err = raw_notifier_call_chain(&listen_notify_list,
+ raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
- return err;
+ return 0;
}
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
@@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
struct tls_crypto_info *crypto_info, tmp_crypto_info;
struct chtls_sock *csk;
int keylen;
+ int cipher_type;
int rc = 0;
csk = rcu_dereference_sk_user_data(sk);
@@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * copy keys from user based on GCM cipher type.
+ */
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
/* Obtain version and type from previous copy */
@@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
}
keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
- rc = chtls_setkey(csk, keylen, optname);
+ cipher_type = TLS_CIPHER_AES_GCM_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ crypto_info[0] = tmp_crypto_info;
+ rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+ optval + sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_256)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_256;
break;
}
default:
rc = -EINVAL;
goto out;
}
+ rc = chtls_setkey(csk, keylen, optname, cipher_type);
out:
return rc;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 73a899e6f837..f4f18bfc2247 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -119,11 +118,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -132,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
tctx->fallback.cip->base.crt_flags |=
(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
- return ret;
+ return crypto_cipher_setkey(tctx->fallback.cip, key, len);
}
static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -154,11 +144,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -168,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(tctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->fallback.skcipher) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
}
static void
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index c0e7a85fe129..8851161f722f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -16,16 +16,22 @@ config CRYPTO_DEV_HISI_SEC
config CRYPTO_DEV_HISI_SEC2
tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_DEV_HISI_QM
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
- CBC, and XTS cipher mode.
+ CBC, and XTS cipher mode, and AEAD algorithms.
To compile this as a module, choose M here: the module
will be called hisi_sec2.
@@ -44,7 +50,6 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select SG_SPLIT
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 98f037e6ea3e..5d400d69e8e4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
- if (id < 0)
+ if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
@@ -174,8 +174,8 @@ static struct hisi_qp *hpre_get_qp_and_start(void)
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
- if (dma_mapping_error(dev, *tmp)) {
+ if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int shift;
shift = ctx->key_sz - len;
- if (shift < 0)
+ if (unlikely(shift < 0))
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
- if (!ptr)
+ if (unlikely(!ptr))
return -ENOMEM;
if (is_src) {
@@ -226,12 +226,12 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
}
static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp;
+ dma_addr_t tmp = 0;
int ret;
/* when the data is dh's source, we should format it */
@@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
else
ret = hpre_prepare_dma_buf(hpre_req, data, len,
is_src, &tmp);
- if (ret)
+ if (unlikely(ret))
return ret;
if (is_src)
@@ -253,15 +253,16 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
}
static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst, struct scatterlist *src)
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
{
struct device *dev = HPRE_DEV(ctx);
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (src) {
@@ -274,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (req->dst) {
@@ -288,7 +289,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
+ void **kreq)
{
struct hpre_asym_request *req;
int err, id, done;
@@ -308,7 +309,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
- if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
@@ -375,7 +376,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_sqe *sqe = resp;
- ctx->req_list[sqe->tag]->cb(ctx, resp);
+ ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -454,33 +455,30 @@ static int hpre_dh_compute_value(struct kpp_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
ret = hpre_msg_request_set(ctx, req, false);
- if (ret)
+ if (unlikely(ret))
return ret;
if (req->src) {
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
if (ctx->crt_g2_mode && !req->src)
- msg->dw0 |= HPRE_ALG_DH_G2;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
- msg->dw0 |= HPRE_ALG_DH;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -520,12 +518,12 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
return -EINVAL;
if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
+ HPRE_BITS_2_BYTES_SHIFT))
return -EINVAL;
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
if (!ctx->dh.xa_p)
return -ENOMEM;
@@ -559,13 +557,12 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) {
- memset(ctx->dh.g, 0, sz);
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
ctx->dh.g = NULL;
}
if (ctx->dh.xa_p) {
- memset(ctx->dh.xa_p, 0, sz);
+ memzero_explicit(ctx->dh.xa_p, sz);
dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
ctx->dh.dma_xa_p);
ctx->dh.xa_p = NULL;
@@ -661,9 +658,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -673,22 +667,22 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.pubkey)
+ if (unlikely(!ctx->rsa.pubkey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -696,7 +690,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -716,9 +710,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -728,27 +719,29 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.prikey)
+ if (unlikely(!ctx->rsa.prikey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
- msg->dw0 |= HPRE_ALG_NC_CRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_CRT);
} else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_NCRT);
}
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -756,7 +749,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -811,10 +804,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
- ctx->rsa.pubkey = NULL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
return -EINVAL;
- }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -836,17 +827,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
return 0;
}
-static int hpre_crt_para_get(char *para, const char *raw,
- unsigned int raw_sz, unsigned int para_size)
+static int hpre_crt_para_get(char *para, size_t para_sz,
+ const char *raw, size_t raw_sz)
{
const char *ptr = raw;
size_t len = raw_sz;
hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_size)
+ if (!len || len > para_sz)
return -EINVAL;
- memcpy(para + para_size - len, ptr, len);
+ memcpy(para + para_sz - len, ptr, len);
return 0;
}
@@ -864,32 +855,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
if (!ctx->rsa.crt_prikey)
return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
- rsa_key->dq_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
+ rsa_key->dq, rsa_key->dq_sz);
if (ret)
goto free_key;
offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
- rsa_key->dp_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->dp, rsa_key->dp_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->q, rsa_key->q_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->p, rsa_key->p_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->qinv, rsa_key->qinv_sz);
if (ret)
goto free_key;
@@ -899,7 +890,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
free_key:
offset = hlf_ksz * HPRE_CRT_PRMS;
- memset(ctx->rsa.crt_prikey, 0, offset);
+ memzero_explicit(ctx->rsa.crt_prikey, offset);
dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
@@ -924,14 +915,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
}
if (ctx->rsa.crt_prikey) {
- memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ memzero_explicit(ctx->rsa.crt_prikey,
+ half_key_sz * HPRE_CRT_PRMS);
dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
}
if (ctx->rsa.prikey) {
- memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
ctx->rsa.dma_prikey);
ctx->rsa.prikey = NULL;
@@ -1043,6 +1035,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->rsa.soft_tfm)) {
@@ -1050,7 +1043,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- return hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx);
+ if (ret)
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+
+ return ret;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 34e0424410bf..401747de67a8 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -73,7 +73,7 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
#define HPRE_QM_USR_CFG_MASK 0xfffffffe
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
@@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
- { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
- { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
- { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
- { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
- { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
- { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
- { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
- { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
- { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
- { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
- { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
+ { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
+ { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
+ { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
+ { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
+ { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
+ { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
+ { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
+ { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
+ { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
{ /* sentinel */ }
};
@@ -490,7 +490,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -557,7 +557,7 @@ static const struct file_operations hpre_ctrl_debug_fops = {
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
if (dir)
file_dir = dir;
@@ -571,10 +571,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -585,7 +583,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -595,10 +592,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
return 0;
}
@@ -609,15 +603,14 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- sprintf(buf, "cluster%d", i);
-
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+ if (ret < 0)
+ return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -627,9 +620,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -668,9 +659,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
int ret;
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
qm->debug.debug_root = dir;
ret = hisi_qm_debug_init(qm);
@@ -1014,8 +1002,6 @@ static void hpre_register_debugfs(void)
return;
hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
- if (IS_ERR_OR_NULL(hpre_debugfs_root))
- hpre_debugfs_root = NULL;
}
static void hpre_unregister_debugfs(void)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index b846d73d9a85..13e2d8d7be94 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -9,10 +9,12 @@
#include "../qm.h"
#include "sec_crypto.h"
-/* Cipher resource per hardware SEC queue */
-struct sec_cipher_res {
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
@@ -21,33 +23,35 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
};
+struct sec_aead_req {
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+ struct aead_request *aead_req;
+};
+
/* SEC request of Crypto */
struct sec_req {
struct sec_sqe sec_sqe;
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
- /* Cipher supported only at present */
struct sec_cipher_req c_req;
+ struct sec_aead_req aead_req;
+
int err_type;
int req_id;
/* Status of the SEC request */
- atomic_t fake_busy;
+ bool fake_busy;
};
/**
* struct sec_req_op - Operations for SEC request
- * @get_res: Get resources for TFM on the SEC device
- * @resource_alloc: Allocate resources for queue context on the SEC device
- * @resource_free: Free resources for queue context on the SEC device
* @buf_map: DMA map the SGL buffers of the request
* @buf_unmap: DMA unmap the SGL buffers of the request
* @bd_fill: Fill the SEC queue BD
@@ -56,18 +60,25 @@ struct sec_req {
* @process: Main processing logic of Skcipher
*/
struct sec_req_op {
- int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
- int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
- void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
- void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
};
+/* SEC auth context */
+struct sec_auth_ctx {
+ dma_addr_t a_key_dma;
+ u8 *a_key;
+ u8 a_key_len;
+ u8 mac_len;
+ u8 a_alg;
+ struct crypto_shash *hash_tfm;
+};
+
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 *c_key;
@@ -83,9 +94,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req **req_list;
+ struct sec_req *req_list[QM_Q_DEPTH];
struct idr req_idr;
- void *alg_meta_data;
+ struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
struct hisi_acc_sgl_pool *c_in_pool;
@@ -93,6 +104,11 @@ struct sec_qp_ctx {
atomic_t pending_reqs;
};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
@@ -110,7 +126,10 @@ struct sec_ctx {
/* Currrent cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
+
+ enum sec_alg_type alg_type;
struct sec_cipher_ctx c_ctx;
+ struct sec_auth_ctx a_ctx;
};
enum sec_endian {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0a5391fff485..a2cfcc9ccd94 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -3,7 +3,11 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/authenc.h>
#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
@@ -27,6 +31,10 @@
#define SEC_SRC_SGL_OFFSET 7
#define SEC_CKEY_OFFSET 9
#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
@@ -35,12 +43,19 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-static DEFINE_MUTEX(sec_algs_lock);
-static unsigned int sec_active_devs;
+static atomic_t sec_active_devs;
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
@@ -50,7 +65,7 @@ static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
ctx->hlf_q_num;
}
-static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
atomic_dec(&ctx->enc_qcyclic);
@@ -67,7 +82,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
- if (req_id < 0) {
+ if (unlikely(req_id < 0)) {
dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
return req_id;
}
@@ -82,7 +97,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
@@ -95,36 +110,66 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
+static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac = mac_out + SEC_MAX_MAC_LEN;
+ struct scatterlist *sgl = aead_req->src;
+ size_t sz;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
+ aead_req->cryptlen + aead_req->assoclen -
+ authsize);
+ if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
+ dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
u16 done, flag;
+ int err = 0;
u8 type;
- struct sec_req *req;
type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (type == SEC_BD_TYPE2) {
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
- req->err_type = bd->type2.error_type;
-
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (req->err_type || done != 0x1 || flag != 0x2)
- dev_err(SEC_CTX_DEV(req->ctx),
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- } else {
+ if (unlikely(type != SEC_BD_TYPE2)) {
pr_err("err bd type [%d]\n", type);
return;
}
- atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
+ (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
+ dev_err(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
+ }
+
+ if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
+ err = sec_aead_verify(req, qp_ctx);
- req->ctx->req_op->buf_unmap(req->ctx, req);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
- req->ctx->req_op->callback(req->ctx, req);
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
}
static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
@@ -137,11 +182,11 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- if (ret == -EBUSY)
+ if (unlikely(ret == -EBUSY))
return -ENOBUFS;
if (!ret) {
- if (atomic_read(&req->fake_busy))
+ if (req->fake_busy)
ret = -EBUSY;
else
ret = -EINPROGRESS;
@@ -150,6 +195,91 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
+
+static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ &res->out_mac_dma, GFP_KERNEL);
+ if (!res->out_mac)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].out_mac_dma = res->out_mac_dma +
+ i * (SEC_MAX_MAC_LEN << 1);
+ res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
+ }
+
+ return 0;
+}
+
+static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->out_mac)
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac, res->out_mac_dma);
+}
+
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
+ int ret;
+
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_mac_resource(dev, res);
+ if (ret)
+ goto get_fail;
+ }
+
+ return 0;
+get_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
+}
+
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
@@ -173,15 +303,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list)
- goto err_destroy_idr;
-
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_free_req_list;
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
@@ -191,7 +317,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
goto err_free_c_in_pool;
}
- ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -202,13 +328,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_req_list:
- kfree(qp_ctx->req_list);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
hisi_qm_release_qp(qp);
@@ -222,66 +346,42 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- kfree(qp_ctx->req_list);
hisi_qm_release_qp(qp_ctx->qp);
}
-static int sec_skcipher_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx;
struct sec_dev *sec;
- struct device *dev;
- struct hisi_qm *qm;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
-
sec = sec_find_device(cpu_to_node(smp_processor_id()));
if (!sec) {
- pr_err("find no Hisilicon SEC device!\n");
+ pr_err("Can not find proper Hisilicon SEC device!\n");
return -ENODEV;
}
ctx->sec = sec;
- qm = &sec->qm;
- dev = &qm->pdev->dev;
- ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx)
return -ENOMEM;
for (i = 0; i < sec->ctx_q_num; i++) {
- ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
- c_ctx = &ctx->c_ctx;
- c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
- if (c_ctx->ivsize > SEC_IV_SIZE) {
- dev_err(dev, "get error iv size!\n");
- ret = -EINVAL;
- goto err_sec_release_qp_ctx;
- }
- c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
- &c_ctx->c_key_dma, GFP_KERNEL);
- if (!c_ctx->c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
return 0;
-
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -290,17 +390,9 @@ err_sec_release_qp_ctx:
return ret;
}
-static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- int i = 0;
-
- if (c_ctx->c_key) {
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
- c_ctx->c_key, c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -308,6 +400,85 @@ static void sec_skcipher_exit(struct crypto_skcipher *tfm)
kfree(ctx->qp_ctx);
}
+static int sec_cipher_init(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_cipher_uninit(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+}
+
+static int sec_auth_init(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &a_ctx->a_key_dma, GFP_KERNEL);
+ if (!a_ctx->a_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_auth_uninit(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key, a_ctx->a_key_dma);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ctx = crypto_skcipher_ctx(tfm);
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
+
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
const u32 keylen,
const enum sec_cmode c_mode)
@@ -420,62 +591,8 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_skcipher_get_res(struct sec_ctx *ctx,
- struct sec_req *req)
-{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
- struct sec_cipher_req *c_req = &req->c_req;
- int req_id = req->req_id;
-
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
-
- return 0;
-}
-
-static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_res *res;
- int i;
-
- res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
- &res->c_ivin_dma, GFP_KERNEL);
- if (!res->c_ivin) {
- kfree(res);
- return -ENOMEM;
- }
-
- for (i = 1; i < QM_Q_DEPTH; i++) {
- res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
- res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
- }
- qp_ctx->alg_meta_data = res;
-
- return 0;
-}
-
-static void sec_skcipher_resource_free(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_cipher_res *res = qp_ctx->alg_meta_data;
- struct device *dev = SEC_CTX_DEV(ctx);
-
- if (!res)
- return;
-
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
- kfree(res);
-}
-
-static int sec_skcipher_map(struct device *dev, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static int sec_cipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -509,12 +626,20 @@ static int sec_skcipher_map(struct device *dev, struct sec_req *req,
return 0;
}
+static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+}
+
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
- c_req->sk_req->src, c_req->sk_req->dst);
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
@@ -523,10 +648,127 @@ static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_req *c_req = &req->c_req;
struct skcipher_request *sk_req = c_req->sk_req;
- if (sk_req->dst != sk_req->src)
- hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+ sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+}
+
+static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
+ struct crypto_authenc_keys *keys)
+{
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aead aes key error!\n");
+ return -EINVAL;
+ }
+ memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
+
+ return 0;
+}
+
+static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ struct crypto_authenc_keys *keys)
+{
+ struct crypto_shash *hash_tfm = ctx->hash_tfm;
+ SHASH_DESC_ON_STACK(shash, hash_tfm);
+ int blocksize, ret;
- hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+ if (!keys->authkeylen) {
+ pr_err("hisi_sec2: aead auth key error!\n");
+ return -EINVAL;
+ }
+
+ blocksize = crypto_shash_blocksize(hash_tfm);
+ if (keys->authkeylen > blocksize) {
+ ret = crypto_shash_digest(shash, keys->authkey,
+ keys->authkeylen, ctx->a_key);
+ if (ret) {
+ pr_err("hisi_sec2: aead auth digest error!\n");
+ return -EINVAL;
+ }
+ ctx->a_key_len = blocksize;
+ } else {
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ ctx->a_key_len = keys->authkeylen;
+ }
+
+ return 0;
+}
+
+static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ const u32 keylen, const enum sec_hash_alg a_alg,
+ const enum sec_calg c_alg,
+ const enum sec_mac_len mac_len,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ ctx->a_ctx.a_alg = a_alg;
+ ctx->c_ctx.c_alg = c_alg;
+ ctx->a_ctx.mac_len = mac_len;
+ c_ctx->c_mode = c_mode;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ ret = sec_aead_aes_set_key(c_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ goto bad_key;
+ }
+
+ return 0;
+bad_key:
+ memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
+
+ return -EINVAL;
+}
+
+
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
+ u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+ SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+ SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+ SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+
+static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+}
+
+static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *cq = &req->c_req;
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ sec_cipher_unmap(dev, cq, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -534,13 +776,13 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ctx->req_op->do_transfer(ctx, req);
ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto unmap_req_buf;
return ret;
@@ -559,10 +801,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
- c_req->c_len = sk_req->cryptlen;
- memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -570,14 +811,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_sqe *sec_sqe = &req->sec_sqe;
- u8 de = 0;
u8 scene, sa_type, da_type;
u8 bd_type, cipher;
+ u8 de = 0;
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -611,25 +853,37 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return 0;
}
-static void sec_update_iv(struct sec_req *req)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct skcipher_request *sk_req = req->c_req.sk_req;
u32 iv_size = req->ctx->c_ctx.ivsize;
struct scatterlist *sgl;
+ unsigned int cryptlen;
size_t sz;
+ u8 *iv;
if (req->c_req.encrypt)
- sgl = sk_req->dst;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
else
- sgl = sk_req->src;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
- iv_size, sk_req->cryptlen - iv_size);
- if (sz != iv_size)
+ if (alg_type == SEC_SKCIPHER) {
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
+ } else {
+ iv = aead_req->iv;
+ cryptlen = aead_req->cryptlen;
+ }
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -638,13 +892,109 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
- sec_update_iv(req);
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
- if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
+ if (req->fake_busy)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- sk_req->base.complete(&sk_req->base, req->err_type);
+ sk_req->base.complete(&sk_req->base, err);
+}
+
+static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+
+ memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+}
+
+static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sec_sqe->type2.mac_key_alg =
+ cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)((ctx->a_key_len) /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
+
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+}
+
+static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ int ret;
+
+ ret = sec_skcipher_bd_fill(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ return ret;
+ }
+
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+{
+ struct aead_request *a_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_cipher_req *c_req = &req->c_req;
+ size_t authsize = crypto_aead_authsize(tfm);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ size_t sz;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+
+ if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
+ sec_update_iv(req, SEC_AEAD);
+
+ /* Copy output mac */
+ if (!err && c_req->encrypt) {
+ struct scatterlist *sgl = a_req->dst;
+
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+ qp_ctx->res[req->req_id].out_mac,
+ authsize, a_req->cryptlen +
+ a_req->assoclen);
+
+ if (unlikely(sz != authsize)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ err = -EINVAL;
+ }
+ }
+
+ sec_free_req_id(req);
+
+ if (req->fake_busy)
+ a_req->base.complete(&a_req->base, -EINPROGRESS);
+
+ a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
@@ -653,37 +1003,30 @@ static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
- sec_put_queue_id(ctx, req);
+ sec_free_queue_id(ctx, req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int issue_id, ret;
+ int queue_id;
/* To load balance */
- issue_id = sec_get_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[issue_id];
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id < 0) {
- sec_put_queue_id(ctx, req);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
return req->req_id;
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- atomic_set(&req->fake_busy, 1);
+ req->fake_busy = true;
else
- atomic_set(&req->fake_busy, 0);
-
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- atomic_dec(&qp_ctx->pending_reqs);
- sec_request_uninit(ctx, req);
- dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
- }
+ req->fake_busy = false;
- return ret;
+ return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
@@ -691,20 +1034,20 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = sec_request_init(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ret = sec_request_transfer(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto err_uninit_req;
/* Output IV as decrypto */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- sec_update_iv(req);
+ sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -712,9 +1055,16 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
err_send_req:
/* As failing, restore the IV from user */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
- ctx->c_ctx.ivsize);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ else
+ memcpy(req->aead_req.aead_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ }
sec_request_untransfer(ctx, req);
err_uninit_req:
@@ -723,10 +1073,7 @@ err_uninit_req:
return ret;
}
-static struct sec_req_op sec_req_ops_tbl = {
- .get_res = sec_skcipher_get_res,
- .resource_alloc = sec_skcipher_resource_alloc,
- .resource_free = sec_skcipher_resource_free,
+static const struct sec_req_op sec_skcipher_req_ops = {
.buf_map = sec_skcipher_sgl_map,
.buf_unmap = sec_skcipher_sgl_unmap,
.do_transfer = sec_skcipher_copy_iv,
@@ -736,39 +1083,139 @@ static struct sec_req_op sec_req_ops_tbl = {
.process = sec_process,
};
+static const struct sec_req_op sec_aead_req_ops = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_copy_iv,
+ .bd_fill = sec_aead_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->req_op = &sec_req_ops_tbl;
+ ctx->req_op = &sec_skcipher_req_ops;
return sec_skcipher_init(tfm);
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
- sec_skcipher_exit(tfm);
+ sec_skcipher_uninit(tfm);
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx,
- struct skcipher_request *sk_req)
+static int sec_aead_init(struct crypto_aead *tfm)
{
- u8 c_alg = ctx->c_ctx.c_alg;
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->alg_type = SEC_AEAD;
+ ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ return -EINVAL;
+ }
+
+ ctx->req_op = &sec_aead_req_ops;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_auth_init(ctx);
+ if (ret)
+ goto err_auth_init;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return ret;
+
+err_cipher_init:
+ sec_auth_uninit(ctx);
+err_auth_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_aead_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_auth_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
+static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ pr_err("hisi_sec2: aead init error!\n");
+ return ret;
+ }
+
+ auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(auth_ctx->hash_tfm)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(auth_ctx->hash_tfm);
+ }
+
+ return 0;
+}
+
+static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+}
+
+static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha1");
+}
+
+static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha256");
+}
+
+static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha512");
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- if (!sk_req->src || !sk_req->dst) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
-
+ sreq->c_req.c_len = sk_req->cryptlen;
if (c_alg == SEC_CALG_3DES) {
- if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
return -EINVAL;
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher aes input length error!\n");
return -EINVAL;
}
@@ -789,14 +1236,14 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
- ret = sec_skcipher_param_check(ctx, sk_req);
- if (ret)
- return ret;
-
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
return ctx->req_op->process(ctx, req);
}
@@ -837,7 +1284,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_algs[] = {
+static struct skcipher_alg sec_skciphers[] = {
SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
AES_BLOCK_SIZE, 0)
@@ -867,23 +1314,133 @@ static struct skcipher_alg sec_algs[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+
+ if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ return -EINVAL;
+ }
+
+ /* Support AES only */
+ if (unlikely(c_alg != SEC_CALG_AES)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ return -EINVAL;
+
+ }
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ req->aead_req.aead_req = a_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ ret = sec_aead_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_aead_encrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, true);
+}
+
+static int sec_aead_decrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, false);
+}
+
+#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+ ctx_exit, blk_size, iv_size, max_authsize)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_aead_decrypt,\
+ .encrypt = sec_aead_encrypt,\
+ .ivsize = iv_size,\
+ .maxauthsize = max_authsize,\
+}
+
+#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
+ SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
+ sec_aead_ctx_exit, blksize, ivsize, authsize)
+
+static struct aead_alg sec_aeads[] = {
+ SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
+ sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
+ sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
+ sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+};
+
int sec_register_to_crypto(void)
{
int ret = 0;
/* To avoid repeat register */
- mutex_lock(&sec_algs_lock);
- if (++sec_active_devs == 1)
- ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ goto reg_aead_fail;
+ }
+
+ return ret;
+
+reg_aead_fail:
+ crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- mutex_lock(&sec_algs_lock);
- if (--sec_active_devs == 0)
- crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ }
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 097dce828340..b2786e17d8fe 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -14,6 +14,18 @@ enum sec_calg {
SEC_CALG_SM4 = 0x3,
};
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
@@ -34,6 +46,12 @@ enum sec_bd_type {
SEC_BD_TYPE2 = 0x2,
};
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
enum sec_cipher_dir {
SEC_CIPHER_ENC = 0x1,
SEC_CIPHER_DEC = 0x2,
@@ -48,8 +66,8 @@ enum sec_addr_type {
struct sec_sqe_type2 {
/*
- * mac_len: 0~5 bits
- * a_key_len: 6~10 bits
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
* a_alg: 11~16 bits
*/
__le32 mac_key_alg;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index ab742dfbab99..2bbaf1e2dae7 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -32,6 +32,7 @@
#define SEC_PF_DEF_Q_NUM 64
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -221,7 +222,7 @@ static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
if (ret)
return -EINVAL;
- if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
}
@@ -235,7 +236,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -608,13 +609,13 @@ static const struct file_operations sec_dbg_fops = {
.write = sec_debug_write,
};
-static int debugfs_atomic64_t_get(void *data, u64 *val)
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
- *val = atomic64_read((atomic64_t *)data);
- return 0;
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
- "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
static int sec_core_debug_init(struct sec_dev *sec)
{
@@ -636,11 +637,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
- &fops_atomic64_t_ro);
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
- &fops_atomic64_t_ro);
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 012023c347b1..0e8c7e324fb4 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -202,18 +202,21 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, ret, sg_n;
+ int i, sg_n, sg_n_mapped;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
- if (sg_n > pool->sge_nr)
+
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ if (!sg_n_mapped)
return ERR_PTR(-EINVAL);
- ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!ret)
+ if (sg_n_mapped > pool->sge_nr) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-EINVAL);
+ }
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
@@ -224,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
- for_each_sg(sgl, sg, sg_n, i) {
+ for_each_sg(sgl, sg, sg_n_mapped, i) {
sg_map_to_hw_sg(sg, curr_hw_sge);
inc_hw_sgl_sge(curr_hw_sgl);
curr_hw_sge++;
@@ -260,7 +263,3 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_DESCRIPTION("HiSilicon Accelerator SGL support");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 79fc4dd3fe00..bc1db26598bb 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -11,6 +11,10 @@
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_ZLIB 0x02
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 795428c1d07e..9815d5e3ccd0 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -46,10 +46,8 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- struct scatterlist *src;
- struct scatterlist *dst;
- size_t slen;
- size_t dlen;
+ int sskip;
+ int dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
@@ -119,13 +117,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen)
+ u32 dlen, int sskip, int dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen;
+ sqe->input_data_length = slen - sskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen;
+ sqe->dest_avail_out = dlen - dskip;
sqe->source_addr_l = lower_32_bits(s_addr);
sqe->source_addr_h = upper_32_bits(s_addr);
sqe->dest_addr_l = lower_32_bits(d_addr);
@@ -327,11 +327,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP)
- kfree(req->dst);
- else
- kfree(req->src);
-
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
memset(req, 0, sizeof(struct hisi_zip_req));
@@ -359,8 +354,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
}
dlen = sqe->produced;
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
acomp_req->dlen = dlen + head_size;
@@ -454,20 +449,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
}
}
-static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes,
- size_t remains, struct scatterlist **out)
-{
-#define SPLIT_NUM 2
- size_t split_sizes[SPLIT_NUM];
- int out_mapped_nents[SPLIT_NUM];
-
- split_sizes[0] = bytes;
- split_sizes[1] = remains;
-
- return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out,
- out_mapped_nents, GFP_KERNEL);
-}
-
static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_qp_ctx *qp_ctx,
size_t head_size, bool is_comp)
@@ -475,31 +456,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *q = req_q->q;
struct hisi_zip_req *req_cache;
- struct scatterlist *out[2];
- struct scatterlist *sgl;
- size_t len;
- int ret, req_id;
-
- /*
- * remove/add zlib/gzip head, as hardware operations do not include
- * comp head. so split req->src to get sgl without heads in acomp, or
- * add comp head to req->dst ahead of that hardware output compressed
- * data in sgl splited from req->dst without comp head.
- */
- if (is_comp) {
- sgl = req->dst;
- len = req->dlen - head_size;
- } else {
- sgl = req->src;
- len = req->slen - head_size;
- }
-
- ret = get_sg_skip_bytes(sgl, head_size, len, out);
- if (ret)
- return ERR_PTR(ret);
-
- /* sgl for comp head is useless, so free it now */
- kfree(out[0]);
+ int req_id;
write_lock(&req_q->req_lock);
@@ -507,7 +464,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- kfree(out[1]);
return ERR_PTR(-EBUSY);
}
set_bit(req_id, req_q->req_bitmap);
@@ -515,16 +471,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
+
if (is_comp) {
- req_cache->src = req->src;
- req_cache->dst = out[1];
- req_cache->slen = req->slen;
- req_cache->dlen = req->dlen - head_size;
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
} else {
- req_cache->src = out[1];
- req_cache->dst = req->dst;
- req_cache->slen = req->slen - head_size;
- req_cache->dlen = req->dlen;
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
}
write_unlock(&req_q->req_lock);
@@ -536,6 +489,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
+ struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
@@ -543,16 +497,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
dma_addr_t output;
int ret;
- if (!req->src || !req->slen || !req->dst || !req->dlen)
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
return -EINVAL;
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool,
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
if (IS_ERR(req->hw_src))
return PTR_ERR(req->hw_src);
req->dma_src = input;
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool,
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
&output);
if (IS_ERR(req->hw_dst)) {
@@ -561,8 +515,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen,
- req->dlen);
+ hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ a_req->dlen, req->sskip, req->dskip);
hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
hisi_zip_config_tag(zip_sqe, req->req_id);
@@ -574,9 +528,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..25d5227f74a1 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -332,10 +332,10 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
struct dma_slave_config dma_conf;
int err = -EINVAL;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 64894d8b442a..2cb53fbae841 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -501,8 +501,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
- priv->config.cd_size,
+ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
+ (priv->config.cd_offset << 14) | priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
@@ -974,16 +974,18 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
+ struct safexcel_token *dmmy;
int ret = 0;
/* Prepare command descriptor */
- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
+ &dmmy);
if (IS_ERR(cdesc))
return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
- cdesc->control_data.refresh = 0;
+ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
/* Prepare result descriptor */
@@ -1331,6 +1333,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
/* res token is behind the descr, but ofs must be rounded to buswdth */
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
@@ -1341,6 +1344,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
/* convert dwords to bytes */
priv->config.cd_offset *= sizeof(u32);
+ priv->config.cdsh_offset *= sizeof(u32);
priv->config.rd_offset *= sizeof(u32);
priv->config.res_offset *= sizeof(u32);
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index b4624b5687ce..94016c505abb 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -40,7 +40,8 @@
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 19
+#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */
+#define EIP197_MAX_TOKENS 16
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
@@ -207,6 +208,7 @@
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
+#define EIP197_CDR_DESC_MODE_ADCP BIT(30)
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
@@ -277,9 +279,9 @@
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
-#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29))
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29)
#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29)
-#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30)
/* EIP197_HIA_DFE/DSE_THR_CTRL */
#define EIP197_DxE_THR_CTRL_EN BIT(30)
@@ -553,6 +555,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
{
token->opcode = EIP197_TOKEN_OPCODE_NOOP;
token->packet_length = BIT(2);
+ token->stat = 0;
+ token->instructions = 0;
}
/* Instructions */
@@ -574,14 +578,13 @@ struct safexcel_control_data_desc {
u16 application_id;
u16 rsvd;
- u8 refresh:2;
- u32 context_lo:30;
+ u32 context_lo;
u32 context_hi;
u32 control0;
u32 control1;
- u32 token[EIP197_MAX_TOKENS];
+ u32 token[EIP197_EMB_TOKENS];
} __packed;
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
@@ -591,7 +594,10 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
+#define EIP197_TYPE_BCLA 0x0
#define EIP197_TYPE_EXTENDED 0x3
+#define EIP197_CONTEXT_SMALL 0x2
+#define EIP197_CONTEXT_SIZE_MASK 0x3
/* Basic Command Descriptor format */
struct safexcel_command_desc {
@@ -599,13 +605,16 @@ struct safexcel_command_desc {
u8 rsvd0:5;
u8 last_seg:1;
u8 first_seg:1;
- u16 additional_cdata_size:8;
+ u8 additional_cdata_size:8;
u32 rsvd1;
u32 data_lo;
u32 data_hi;
+ u32 atok_lo;
+ u32 atok_hi;
+
struct safexcel_control_data_desc control_data;
} __packed;
@@ -629,15 +638,20 @@ enum eip197_fw {
struct safexcel_desc_ring {
void *base;
+ void *shbase;
void *base_end;
+ void *shbase_end;
dma_addr_t base_dma;
+ dma_addr_t shbase_dma;
/* write and read pointers */
void *write;
+ void *shwrite;
void *read;
/* descriptor element offset */
- unsigned offset;
+ unsigned int offset;
+ unsigned int shoffset;
};
enum safexcel_alg_type {
@@ -652,6 +666,7 @@ struct safexcel_config {
u32 cd_size;
u32 cd_offset;
+ u32 cdsh_offset;
u32 rd_size;
u32 rd_offset;
@@ -862,7 +877,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 len,
u32 full_data_len,
- dma_addr_t context);
+ dma_addr_t context,
+ struct safexcel_token **atoken);
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index c02995694b41..0c5e80c3f6e3 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -47,8 +47,12 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
- char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aadskip;
+ u8 blocksz;
+ u32 ivmask;
+ u32 ctrinit;
__le32 key[16];
u32 nonce;
@@ -72,251 +76,298 @@ struct safexcel_cipher_req {
int nr_src, nr_dst;
};
-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
- struct safexcel_command_desc *cdesc)
+static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
{
- u32 block_sz = 0;
-
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
- ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
-
- if (ctx->alg == SAFEXCEL_CHACHA20 ||
- ctx->xcm == EIP197_XCM_MODE_CCM) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] =
- (__force u32)cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
- (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* 96 bit IV part */
- memcpy(&cdesc->control_data.token[0], iv, 12);
-
- if (ctx->alg == SAFEXCEL_CHACHA20) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- *(__be32 *)&cdesc->control_data.token[3] =
- cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return 4;
+ }
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 96 bit nonce part */
memcpy(&cdesc->control_data.token[0], &iv[4], 12);
/* 32 bit counter */
cdesc->control_data.token[3] = *(u32 *)iv;
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* Variable length IV part */
- memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
- /* Start variable length counter at 0 */
- memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
- 0, iv[0] + 1);
-
- return;
+ return 4;
}
- if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
- switch (ctx->alg) {
- case SAFEXCEL_DES:
- block_sz = DES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_3DES:
- block_sz = DES3_EDE_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_SM4:
- block_sz = SM4_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_AES:
- block_sz = AES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- default:
- break;
- }
- memcpy(cdesc->control_data.token, iv, block_sz);
- }
+ cdesc->control_data.options |= ctx->ivmask;
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
+ return ctx->blocksz / sizeof(u32);
}
static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
u32 length)
{
struct safexcel_token *token;
+ int ivlen;
- safexcel_cipher_token(ctx, iv, cdesc);
+ ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
+ if (ivlen == 4) {
+ /* No space in cdesc, instruction moves to atoken */
+ cdesc->additional_cdata_size = 1;
+ token = atoken;
+ } else {
+ /* Everything fits in cdesc */
+ token = (struct safexcel_token *)(cdesc->control_data.token + 2);
+ /* Need to pad with NOP */
+ eip197_noop_token(&token[1]);
+ }
- /* skip over worst case IV of 4 dwords, no need to be exact */
- token = (struct safexcel_token *)(cdesc->control_data.token + 4);
+ token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token->packet_length = length;
+ token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
+ token->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
- EIP197_TOKEN_STAT_LAST_HASH;
- token[0].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
+{
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ /* 32 bit nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 96 bit IV part */
+ memcpy(&cdesc->control_data.token[0], iv, 12);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ /* CBC */
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
enum safexcel_cipher_direction direction,
u32 cryptlen, u32 assoclen, u32 digestsize)
{
- struct safexcel_token *token;
+ struct safexcel_token *aadref;
+ int atoksize = 2; /* Start with minimum size */
+ int assocadj = assoclen - ctx->aadskip, aadalign;
- safexcel_cipher_token(ctx, iv, cdesc);
+ /* Always 4 dwords of embedded IV for AEAD modes */
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- if (direction == SAFEXCEL_ENCRYPT) {
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- } else {
+ if (direction == SAFEXCEL_DECRYPT)
cryptlen -= digestsize;
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 15);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
-
- token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[14].packet_length = digestsize |
- EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
- }
-
- if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
- /* For ESP mode (and not GMAC), skip over the IV */
- token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
-
- assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
- }
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
+ /* Construct IV block B0 for the CBC-MAC */
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
+ u8 *cbcmaciv = (u8 *)&atoken[1];
+ __le32 *aadlen = (__le32 *)&atoken[5];
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* Length + nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* Fixup flags byte */
+ *(__le32 *)cbcmaciv =
+ cpu_to_le32(ctx->nonce |
+ ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2));
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ memcpy(cbcmaciv + 4, iv, 8);
+ /* Start counter at 0 */
+ cdesc->control_data.token[3] = 0;
+ /* Message length */
+ *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
+ } else {
+ /* Variable length IV part */
+ memcpy(final_iv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, iv, 15 - iv[0]);
+ /* Start variable length counter at 0 */
+ memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
+ memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ /* fixup flags byte */
+ cbcmaciv[0] |= ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2);
+ /* insert lower 2 bytes of message length */
+ cbcmaciv[14] = cryptlen >> 8;
+ cbcmaciv[15] = cryptlen & 255;
+ }
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE +
+ ((assocadj > 0) << 1);
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(assocadj)) {
+ *aadlen = cpu_to_le32((assocadj >> 8) |
+ (assocadj & 255) << 8);
+ atoken += 6;
+ atoksize += 7;
+ } else {
+ atoken += 5;
+ atoksize += 6;
+ }
- if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
- token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[11].packet_length = cryptlen;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- /* Do not send to crypt engine in case of GMAC */
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ atoken++;
+
+ /* For CCM only, align AAD data towards hash engine */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ aadalign = (assocadj + 2) & 15;
+ atoken->packet_length = assocadj && aadalign ?
+ 16 - aadalign :
+ 0;
+ if (likely(cryptlen)) {
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
- } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
- token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ } else {
+ safexcel_aead_iv(ctx, iv, cdesc);
+
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
+ atoken++;
- if (!ctx->xcm)
- return;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
+ direction == SAFEXCEL_DECRYPT)) {
+ /* Poly-chacha decryption needs a dummy NOP here ... */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16; /* According to Op Manual */
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ }
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[9].packet_length = 0;
- token[9].instructions = AES_BLOCK_SIZE;
+ if (ctx->xcm) {
+ /* For GCM and CCM, obtain enc(Y0) */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ atoken->packet_length = 0;
+ atoken->stat = 0;
+ atoken->instructions = AES_BLOCK_SIZE;
+ atoken++;
+
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
+ atoken++;
+ atoksize += 2;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[10].packet_length = AES_BLOCK_SIZE;
- token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ /* Fixup stat field for AAD direction instruction */
+ aadref->stat = 0;
- if (ctx->xcm != EIP197_XCM_MODE_GCM) {
- u8 *final_iv = (u8 *)cdesc->control_data.token;
- u8 *cbcmaciv = (u8 *)&token[1];
- __le32 *aadlen = (__le32 *)&token[5];
+ /* Process crypto data */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = cryptlen;
- /* Construct IV block B0 for the CBC-MAC */
- token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[0].packet_length = AES_BLOCK_SIZE +
- ((assoclen > 0) << 1);
- token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
- EIP197_TOKEN_INS_TYPE_HASH;
- /* Variable length IV part */
- memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
- /* fixup flags byte */
- cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
- /* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
- /* insert lower 2 bytes of message length */
- cbcmaciv[14] = cryptlen >> 8;
- cbcmaciv[15] = cryptlen & 255;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ /* Fixup instruction field for AAD dir instruction */
+ aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
- if (assoclen) {
- *aadlen = cpu_to_le32((assoclen >> 8) |
- ((assoclen & 0xff) << 8));
- assoclen += 2;
+ /* Do not send to crypt engine in case of GMAC */
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align AAD data towards hash engine */
- token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
- assoclen &= 15;
- token[7].packet_length = assoclen ? 16 - assoclen : 0;
-
- if (likely(cryptlen)) {
- token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align crypto data towards hash engine */
- token[11].stat = 0;
-
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- cryptlen &= 15;
- token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ cryptlen &= 15;
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
+ atoken->stat = 0;
+ /* For CCM only, pad crypto data to the hash engine */
+ atoken++;
+ atoksize++;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16 - cryptlen;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[7].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
}
+ atoken++;
+ atoksize++;
}
+
+ if (direction == SAFEXCEL_ENCRYPT) {
+ /* Append ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ } else {
+ /* Extract ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ atoken++;
+ atoksize++;
+
+ /* Verify ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ atoken->packet_length = digestsize |
+ EIP197_TOKEN_HASH_RESULT_VERIFY;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
+
+ /* Fixup length of the token in the command descriptor */
+ cdesc->additional_cdata_size = atoksize;
}
static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
@@ -329,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
@@ -382,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
case SAFEXCEL_DES:
err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_AES:
err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
@@ -450,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
}
- crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_RES_MASK);
-
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
@@ -470,8 +516,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-badkey_expflags:
memzero_explicit(&keys, sizeof(keys));
return err;
}
@@ -656,6 +700,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
+ struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
@@ -730,56 +775,60 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
- /* The EIP cannot deal with zero length input packets! */
- if (totlen == 0)
- totlen = 1;
+ if (!totlen) {
+ /*
+ * The EIP97 cannot deal with zero length input packets!
+ * So stuff a dummy command descriptor indicating a 1 byte
+ * (dummy) input packet, using the context record as source.
+ */
+ first_cdesc = safexcel_add_cdesc(priv, ring,
+ 1, 1, ctx->base.ctxr_dma,
+ 1, 1, ctx->base.ctxr_dma,
+ &atoken);
+ if (IS_ERR(first_cdesc)) {
+ /* No space left in the command descriptor ring */
+ ret = PTR_ERR(first_cdesc);
+ goto cdesc_rollback;
+ }
+ n_cdesc = 1;
+ goto skip_cdesc;
+ }
/* command descriptors */
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - len < 0)
+ if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - len),
sg_dma_address(sg), len, totlen,
- ctx->base.ctxr_dma);
+ ctx->base.ctxr_dma, &atoken);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
goto cdesc_rollback;
}
- n_cdesc++;
- if (n_cdesc == 1) {
+ if (!n_cdesc)
first_cdesc = cdesc;
- }
+ n_cdesc++;
queued -= len;
if (!queued)
break;
}
-
- if (unlikely(!n_cdesc)) {
- /*
- * Special case: zero length input buffer.
- * The engine always needs the 1st command descriptor, however!
- */
- first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
- ctx->base.ctxr_dma);
- n_cdesc = 1;
- }
-
+skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
if (ctx->aead)
- safexcel_aead_token(ctx, iv, first_cdesc,
+ safexcel_aead_token(ctx, iv, first_cdesc, atoken,
sreq->direction, cryptlen,
assoclen, digestsize);
else
- safexcel_skcipher_token(ctx, iv, first_cdesc,
+ safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
/* result descriptors */
@@ -1166,6 +1215,8 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
return 0;
}
@@ -1230,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1264,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1300,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -1336,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -1381,10 +1437,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
/* exclude the nonce here */
keylen = len - CTR_RFC3686_NONCE_SIZE;
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -1410,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -1445,6 +1500,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1452,7 +1508,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1468,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1505,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1537,6 +1597,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1544,7 +1605,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1560,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1597,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1652,6 +1717,9 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
+ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
@@ -1840,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1874,6 +1944,8 @@ static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1908,6 +1980,8 @@ static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1942,6 +2016,8 @@ static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1976,6 +2052,8 @@ static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2010,6 +2088,8 @@ static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2044,6 +2124,8 @@ static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2078,6 +2160,8 @@ static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2112,6 +2196,8 @@ static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2146,6 +2232,8 @@ static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2362,10 +2450,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* Only half of the key data is cipher key */
keylen = (len >> 1);
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2381,10 +2467,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* The other half is the tweak key */
ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2412,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->xts = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
return 0;
@@ -2472,7 +2557,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2496,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2532,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->hkaes))
- return PTR_ERR(ctx->hkaes);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->hkaes);
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
@@ -2589,7 +2668,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2632,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
ctx->state_sz = 3 * AES_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_CCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+ ctx->ctrinit = 0;
return 0;
}
@@ -2719,10 +2798,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- if (len != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2734,6 +2812,7 @@ static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->ctrinit = 0;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
return 0;
}
@@ -2775,10 +2854,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
ctx->nonce = *(u32 *)(key + len);
}
- if (len != CHACHA_KEY_SIZE) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2885,6 +2963,7 @@ static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
ctx->alg = SAFEXCEL_CHACHA20;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->ctrinit = 0;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
ctx->state_sz = 0; /* Precomputed by HW */
return 0;
@@ -2933,6 +3012,7 @@ static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_chachapoly_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -2971,10 +3051,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- if (len != SM4_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != SM4_KEY_SIZE)
return -EINVAL;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, SM4_KEY_SIZE))
@@ -3013,6 +3091,8 @@ static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -3047,6 +3127,7 @@ static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -3083,6 +3164,7 @@ static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -3119,6 +3201,7 @@ static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -3169,6 +3252,7 @@ static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -3228,6 +3312,7 @@ static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
safexcel_aead_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -3335,6 +3420,7 @@ static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
ctx->state_sz = SM3_DIGEST_SIZE;
return 0;
@@ -3473,6 +3559,7 @@ static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -3607,6 +3694,7 @@ static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_ccm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2134daef24f6..43962bc709c6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -87,12 +87,14 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
input_length &= 15;
if (unlikely(cbcmac && input_length)) {
+ token[0].stat = 0;
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[1].packet_length = 16 - input_length;
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ eip197_noop_token(&token[1]);
}
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -101,6 +103,8 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
token[2].packet_length = result_length;
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+ eip197_noop_token(&token[3]);
}
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
@@ -111,6 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
+ cdesc->control_data.control1 = 0;
/*
* Copy the input digest if needed, and setup the context
@@ -277,7 +282,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
sreq->processed = sreq->block_sz;
sreq->hmac = 0;
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE)
+ ctx->base.needs_inv = true;
areq->nbytes = 0;
safexcel_ahash_enqueue(areq);
@@ -314,6 +320,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
+ struct safexcel_token *dmmy;
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
u64 queued, len;
@@ -397,7 +404,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
req->cache_dma, cache_len,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma,
+ &dmmy);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -436,7 +444,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
sg_dma_address(sg) + skip, sglen,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma, &dmmy);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
@@ -1911,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- if (keylen != sizeof(u32)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
memcpy(ctx->ipad, key, sizeof(u32));
return 0;
@@ -1987,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
@@ -2057,18 +2061,14 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
/* precompute the XCBC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2088,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_cipher_setkey(ctx->kaes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2160,10 +2158,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
for (i = 0; i < len / sizeof(u32); i++)
ctx->ipad[i + 8] =
@@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 9237ba745c2f..e454c3d44f07 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,6 +14,11 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
+ int i;
+ struct safexcel_command_desc *cdesc;
+ dma_addr_t atok;
+
+ /* Actual command descriptor ring */
cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
@@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
+ /* Command descriptor shadow ring for storing additional token data */
+ cdr->shoffset = priv->config.cdsh_offset;
+ cdr->shbase = dmam_alloc_coherent(priv->dev,
+ cdr->shoffset *
+ EIP197_DEFAULT_RING_SIZE,
+ &cdr->shbase_dma, GFP_KERNEL);
+ if (!cdr->shbase)
+ return -ENOMEM;
+ cdr->shwrite = cdr->shbase;
+ cdr->shbase_end = cdr->shbase + cdr->shoffset *
+ (EIP197_DEFAULT_RING_SIZE - 1);
+
+ /*
+ * Populate command descriptors with physical pointers to shadow descs.
+ * Note that we only need to do this once if we don't overwrite them.
+ */
+ cdesc = cdr->base;
+ atok = cdr->shbase_dma;
+ for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
+ cdesc->atok_lo = lower_32_bits(atok);
+ cdesc->atok_hi = upper_32_bits(atok);
+ cdesc = (void *)cdesc + cdr->offset;
+ atok += cdr->shoffset;
+ }
+
rdr->offset = priv->config.rd_offset;
+ /* Use shoffset for result token offset here */
+ rdr->shoffset = priv->config.res_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
-static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_desc_ring *ring)
+static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ bool first,
+ struct safexcel_token **atoken)
{
void *ptr = ring->write;
+ if (first)
+ *atoken = ring->shwrite;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end) {
+ ring->write = ring->base;
+ ring->shwrite = ring->shbase;
+ } else {
+ ring->write += ring->offset;
+ ring->shwrite += ring->shoffset;
+ }
+
+ return ptr;
+}
+
+static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ struct result_data_desc **rtoken)
+{
+ void *ptr = ring->write;
+
+ /* Result token at relative offset shoffset */
+ *rtoken = ring->write + ring->shoffset;
+
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
@@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
if (ring->write == ring->read)
return;
- if (ring->write == ring->base)
+ if (ring->write == ring->base) {
ring->write = ring->base_end;
- else
+ ring->shwrite = ring->shbase_end;
+ } else {
ring->write -= ring->offset;
+ ring->shwrite -= ring->shoffset;
+ }
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
@@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
- dma_addr_t context) {
+ dma_addr_t context,
+ struct safexcel_token **atoken)
+{
struct safexcel_command_desc *cdesc;
- int i;
- cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+ cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
+ first, atoken);
if (IS_ERR(cdesc))
return cdesc;
- memset(cdesc, 0, sizeof(struct safexcel_command_desc));
-
- cdesc->first_seg = first;
- cdesc->last_seg = last;
cdesc->particle_size = data_len;
+ cdesc->rsvd0 = 0;
+ cdesc->last_seg = last;
+ cdesc->first_seg = first;
+ cdesc->additional_cdata_size = 0;
+ cdesc->rsvd1 = 0;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
- if (first && context) {
- struct safexcel_token *token =
- (struct safexcel_token *)cdesc->control_data.token;
-
+ if (first) {
/*
* Note that the length here MUST be >0 or else the EIP(1)97
* may hang. Newer EIP197 firmware actually incorporates this
@@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
cdesc->control_data.packet_length = full_data_len ?: 1;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
- EIP197_OPTION_CTX_CTRL_IN_CMD;
- cdesc->control_data.context_lo =
- (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+ EIP197_OPTION_CTX_CTRL_IN_CMD |
+ EIP197_OPTION_RC_AUTO;
+ cdesc->control_data.type = EIP197_TYPE_BCLA;
+ cdesc->control_data.context_lo = lower_32_bits(context) |
+ EIP197_CONTEXT_SMALL;
cdesc->control_data.context_hi = upper_32_bits(context);
-
- if (priv->version == EIP197B_MRVL ||
- priv->version == EIP197D_MRVL)
- cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
-
- /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
- cdesc->control_data.refresh = 2;
-
- for (i = 0; i < EIP197_MAX_TOKENS; i++)
- eip197_noop_token(&token[i]);
}
return cdesc;
@@ -171,19 +227,27 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
+ struct result_data_desc *rtoken;
- rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+ rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
+ &rtoken);
if (IS_ERR(rdesc))
return rdesc;
- memset(rdesc, 0, sizeof(struct safexcel_result_desc));
-
- rdesc->first_seg = first;
+ rdesc->particle_size = len;
+ rdesc->rsvd0 = 0;
+ rdesc->descriptor_overflow = 0;
+ rdesc->buffer_overflow = 0;
rdesc->last_seg = last;
+ rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
- rdesc->particle_size = len;
+ rdesc->rsvd1 = 0;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
+ /* Clear length & error code in result token */
+ rtoken->packet_length = 0;
+ rtoken->error_code = 0;
+
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 391e3b4df364..ad73fc946682 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -740,7 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ int err;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
@@ -757,12 +757,13 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
case 24: keylen_cfg = MOD_AES192; break;
case 32: keylen_cfg = MOD_AES256; break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else {
- crypto_des_verify_key(tfm, key);
+ err = crypto_des_verify_key(tfm, key);
+ if (err)
+ return err;
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
@@ -819,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
@@ -835,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1096,7 +1086,6 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_maxauthsize(tfm);
int ret;
@@ -1120,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- goto out;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1169,7 +1147,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d8e8c857770c..c24f34a48cef 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int i;
ret = aes_expandkey(&ctx->aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90880a81c534..78d660d963e2 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
break;
default:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1033,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f438b425c655..435ac1c83df9 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
/*
* AES 128 is supposed by the hardware, store key into temporary
@@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
- if (!ret)
- return 0;
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
+ return crypto_sync_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 63bd565048f4..f5c468f2cc82 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9bbedbccfadf..32dc00dc570b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -13,6 +13,7 @@
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
@@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
{
struct aead_request *req = dd->aead_req;
- dd->flags &= ~FLAGS_BUSY;
dd->in_sg = NULL;
dd->out_sg = NULL;
- req->base.complete(&req->base, ret);
+ crypto_finalize_aead_request(dd->engine, req, ret);
+
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
}
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
@@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
}
omap_aes_gcm_finish_req(dd, ret);
- omap_aes_gcm_handle_queue(dd, NULL);
}
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
@@ -120,11 +122,16 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_ASSOC_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
if (cryptlen) {
tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
+ if (nsg)
+ sg_unmark_end(dd->in_sgl);
+
ret = omap_crypto_align_sg(&tmp, cryptlen,
AES_BLOCK_SIZE, &dd->in_sgl[nsg],
OMAP_CRYPTO_COPY_DATA |
@@ -132,6 +139,8 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_IN_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
dd->in_sg = dd->in_sgl;
@@ -142,18 +151,20 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->out_sg = req->dst;
dd->orig_out = req->dst;
- dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
+ dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
flags = 0;
if (req->src == req->dst || dd->out_sg == sg_arr)
flags |= OMAP_CRYPTO_FORCE_COPY;
- ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
- AES_BLOCK_SIZE, &dd->out_sgl,
- flags,
- FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
- if (ret)
- return ret;
+ if (cryptlen) {
+ ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
+ AES_BLOCK_SIZE, &dd->out_sgl,
+ flags,
+ FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+ if (ret)
+ return ret;
+ }
dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
@@ -161,62 +172,12 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
return 0;
}
-static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
-{
- struct omap_aes_gcm_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
{
- struct scatterlist iv_sg, tag_sg;
- struct skcipher_request *sk_req;
- struct omap_aes_gcm_result result;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- int ret = 0;
-
- sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
- if (!sk_req) {
- pr_err("skcipher: Failed to allocate request\n");
- return -ENOMEM;
- }
-
- init_completion(&result.completion);
-
- sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
- sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
- skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- omap_aes_gcm_complete, &result);
- ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
- skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
- NULL);
- ret = crypto_skcipher_encrypt(sk_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret) {
- ret = result.err;
- if (!ret) {
- reinit_completion(&result.completion);
- break;
- }
- }
- /* fall through */
- default:
- pr_err("Encryption of IV failed for GCM mode\n");
- break;
- }
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- skcipher_request_free(sk_req);
- return ret;
+ aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
+ return 0;
}
void omap_aes_gcm_dma_out_callback(void *data)
@@ -246,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data)
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
struct aead_request *req)
{
- struct omap_aes_ctx *ctx;
- struct aead_request *backlog;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = aead_enqueue_request(&dd->aead_queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
-
- backlog = aead_get_backlog(&dd->aead_queue);
- req = aead_dequeue_request(&dd->aead_queue);
- if (req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!req)
- return ret;
+ return crypto_transfer_aead_request_to_engine(dd->engine, req);
- if (backlog)
- backlog->base.complete(&backlog->base, -EINPROGRESS);
+ return 0;
+}
- ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- rctx = aead_request_ctx(req);
+static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int err;
- dd->ctx = ctx;
- rctx->dd = dd;
dd->aead_req = req;
rctx->mode &= FLAGS_MODE_MASK;
@@ -286,16 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
if (err)
return err;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
+ dd->ctx = &ctx->octx;
- if (err) {
- omap_aes_gcm_finish_req(dd, err);
- omap_aes_gcm_handle_queue(dd, NULL);
- }
-
- return ret;
+ return omap_aes_write_ctrl(dd);
}
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
@@ -350,36 +288,39 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
int omap_aes_4106gcm_encrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
FLAGS_RFC4106_GCM);
}
int omap_aes_4106gcm_decrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
}
int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
+ memcpy(ctx->octx.key, key, keylen);
+ ctx->octx.keylen = keylen;
return 0;
}
@@ -387,19 +328,63 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (keylen < 4)
return -EINVAL;
-
keylen -= 4;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
- memcpy(ctx->key, key, keylen);
- memcpy(ctx->nonce, key + keylen, 4);
- ctx->keylen = keylen;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(ctx->octx.key, key, keylen);
+ memcpy(ctx->octx.nonce, key + keylen, 4);
+ ctx->octx.keylen = keylen;
+
+ return 0;
+}
+
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ int ret = 0;
+
+ if (!dd)
+ return -ENODEV;
+
+ if (dd->in_sg_len)
+ ret = omap_aes_crypt_dma_start(dd);
+ else
+ omap_aes_gcm_dma_out_callback(dd);
+
+ return ret;
+}
+
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a1fc03ed01f3..824ddf2a66ff 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
struct scatterlist *out_sg,
int in_sg_len, int out_sg_len)
{
- struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc;
struct dma_slave_config cfg;
int ret;
if (dd->pio_only) {
scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ if (out_sg_len)
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
/* No callback necessary */
tx_in->callback_param = dd;
+ tx_in->callback = NULL;
/* OUT */
- ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
- if (ret) {
- dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
- ret);
- return ret;
- }
+ if (out_sg_len) {
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
- tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!tx_out) {
- dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
- return -EINVAL;
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
+ out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ cb_desc = tx_out;
+ } else {
+ cb_desc = tx_in;
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ cb_desc->callback = omap_aes_gcm_dma_out_callback;
else
- tx_out->callback = omap_aes_dma_out_callback;
- tx_out->callback_param = dd;
+ cb_desc->callback = omap_aes_dma_out_callback;
+ cb_desc->callback_param = dd;
+
dmaengine_submit(tx_in);
- dmaengine_submit(tx_out);
+ if (tx_out)
+ dmaengine_submit(tx_out);
dma_async_issue_pending(dd->dma_lch_in);
- dma_async_issue_pending(dd->dma_lch_out);
+ if (out_sg_len)
+ dma_async_issue_pending(dd->dma_lch_out);
/* start DMA */
dd->pdata->trigger(dd, dd->total);
@@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
return -EINVAL;
}
- err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
- if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- return -EINVAL;
+ if (dd->out_sg_len) {
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
}
}
@@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
+ if (dd->out_sg_len)
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
}
return err;
@@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
return omap_aes_crypt_dma_start(dd);
}
+static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
+}
+
static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
@@ -494,12 +517,16 @@ static void omap_aes_done_task(unsigned long data)
omap_aes_crypt_dma_stop(dd);
}
- omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+ omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
- omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+ omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ /* Update IV output */
+ if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
+ omap_aes_copy_ivout(dd, dd->req->iv);
+
omap_aes_finish_req(dd, 0);
pr_debug("exit\n");
@@ -513,6 +540,9 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct omap_aes_dev *dd;
int ret;
+ if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR))
+ return -EINVAL;
+
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
@@ -627,36 +657,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
-{
- struct omap_aes_dev *dd = NULL;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int err;
-
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
-
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
-
- tfm->reqsize = sizeof(struct omap_aes_reqctx);
- ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- if (IS_ERR(ctx->ctr)) {
- pr_warn("could not load aes driver for encrypting IV\n");
- return PTR_ERR(ctx->ctr);
- }
-
- return 0;
-}
-
static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -667,19 +667,6 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
ctx->fallback = NULL;
}
-static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
-{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-
- if (ctx->ctr)
- crypto_free_skcipher(ctx->ctr);
-}
-
/* ********************** ALGS ************************************ */
static struct skcipher_alg algs_ecb_cbc[] = {
@@ -732,7 +719,7 @@ static struct skcipher_alg algs_ctr[] = {
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct omap_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -763,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
.encrypt = omap_aes_gcm_encrypt,
.decrypt = omap_aes_gcm_decrypt,
},
@@ -783,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
},
@@ -1296,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
+
+ sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
return 0;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d3575231e31..2d111bf906e1 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -9,6 +9,7 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/aes.h>
#include <crypto/engine.h>
#define DST_MAXBURST 4
@@ -79,7 +80,6 @@
#define FLAGS_INIT BIT(5)
#define FLAGS_FAST BIT(6)
-#define FLAGS_BUSY BIT(7)
#define FLAGS_IN_DATA_ST_SHIFT 8
#define FLAGS_OUT_DATA_ST_SHIFT 10
@@ -98,7 +98,11 @@ struct omap_aes_ctx {
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
struct crypto_sync_skcipher *fallback;
- struct crypto_skcipher *ctr;
+};
+
+struct omap_aes_gcm_ctx {
+ struct omap_aes_ctx octx;
+ struct crypto_aes_ctx actx;
};
struct omap_aes_reqctx {
@@ -202,8 +206,12 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int omap_aes_gcm_encrypt(struct aead_request *req);
int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
int omap_aes_4106gcm_encrypt(struct aead_request *req);
int omap_aes_4106gcm_decrypt(struct aead_request *req);
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize);
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm);
int omap_aes_write_ctrl(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 7d592d93bb1c..cc88b7362bc2 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -154,6 +154,41 @@ int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
}
EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
+static void omap_crypto_copy_data(struct scatterlist *src,
+ struct scatterlist *dst,
+ int offset, int len)
+{
+ int amt;
+ void *srcb, *dstb;
+ int srco = 0, dsto = offset;
+
+ while (src && dst && len) {
+ if (srco >= src->length) {
+ srco -= src->length;
+ src = sg_next(src);
+ continue;
+ }
+
+ if (dsto >= dst->length) {
+ dsto -= dst->length;
+ dst = sg_next(dst);
+ continue;
+ }
+
+ amt = min(src->length - srco, dst->length - dsto);
+ amt = min(len, amt);
+
+ srcb = sg_virt(src) + srco;
+ dstb = sg_virt(dst) + dsto;
+
+ memcpy(dstb, srcb, amt);
+
+ srco += amt;
+ dsto += amt;
+ len -= amt;
+ }
+}
+
void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
int offset, int len, u8 flags_shift,
unsigned long flags)
@@ -171,7 +206,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
pages = get_order(len);
if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
- scatterwalk_map_and_copy(buf, orig, offset, len, 1);
+ omap_crypto_copy_data(sg, orig, offset, len);
if (flags & OMAP_CRYPTO_DATA_COPIED)
free_pages((unsigned long)buf, pages);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 4c4dbc2b377e..8eda43319204 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -597,6 +597,7 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
static void omap_des_done_task(unsigned long data)
{
struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ int i;
pr_debug("enter done_task\n");
@@ -615,6 +616,11 @@ static void omap_des_done_task(unsigned long data)
omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ for (i = 0; i < 2; i++)
+ ((u32 *)dd->req->iv)[i] =
+ omap_des_read(dd, DES_REG_IV(dd, i));
+
omap_des_finish_req(dd, 0);
pr_debug("exit\n");
@@ -631,10 +637,11 @@ static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
+ if (!req->cryptlen)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
return -EINVAL;
- }
dd = omap_des_find_dev(ctx);
if (!dd)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ac80bc6af093..4f915a4ef5b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,6 +112,8 @@
#define FLAGS_BE32_SHA1 8
#define FLAGS_SGS_COPIED 9
#define FLAGS_SGS_ALLOCED 10
+#define FLAGS_HUGE 11
+
/* context flags */
#define FLAGS_FINUP 16
@@ -136,6 +138,8 @@
#define BUFLEN SHA512_BLOCK_SIZE
#define OMAP_SHA_DMA_THRESHOLD 256
+#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
+
struct omap_sham_dev;
struct omap_sham_reqctx {
@@ -644,6 +648,8 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
struct scatterlist *tmp;
int offset = ctx->offset;
+ ctx->total = new_len;
+
if (ctx->bufcnt)
n++;
@@ -661,15 +667,16 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
tmp = sg_next(tmp);
ctx->sg_len++;
+ new_len -= ctx->bufcnt;
}
while (sg && new_len) {
int len = sg->length - offset;
- if (offset) {
+ if (len <= 0) {
offset -= sg->length;
- if (offset < 0)
- offset = 0;
+ sg = sg_next(sg);
+ continue;
}
if (new_len < len)
@@ -677,33 +684,37 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
if (len > 0) {
new_len -= len;
- sg_set_page(tmp, sg_page(sg), len, sg->offset);
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
+ offset = 0;
+ ctx->offset = 0;
+ ctx->sg_len++;
if (new_len <= 0)
- sg_mark_end(tmp);
+ break;
tmp = sg_next(tmp);
- ctx->sg_len++;
}
sg = sg_next(sg);
}
+ if (tmp)
+ sg_mark_end(tmp);
+
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
return 0;
}
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
- struct scatterlist *sg, int bs, int new_len)
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
{
int pages;
void *buf;
- int len;
-
- len = new_len + ctx->bufcnt;
- pages = get_order(ctx->total);
+ pages = get_order(new_len);
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
if (!buf) {
@@ -715,14 +726,15 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
- ctx->total - ctx->bufcnt, 0);
+ min(new_len, ctx->total) - ctx->bufcnt, 0);
sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, buf, len);
+ sg_set_buf(ctx->sgl, buf, new_len);
ctx->sg = ctx->sgl;
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
ctx->sg_len = 1;
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
- ctx->offset = 0;
+ ctx->total = new_len;
return 0;
}
@@ -737,6 +749,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
struct scatterlist *sg_tmp = sg;
int new_len;
int offset = rctx->offset;
+ int bufcnt = rctx->bufcnt;
if (!sg || !sg->length || !nbytes)
return 0;
@@ -751,12 +764,28 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
else
new_len = (new_len - 1) / bs * bs;
+ if (!new_len)
+ return 0;
+
if (nbytes != new_len)
list_ok = false;
while (nbytes > 0 && sg_tmp) {
n++;
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ list_ok = false;
+
+ continue;
+ }
+
#ifdef CONFIG_ZONE_DMA
if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
aligned = false;
@@ -794,13 +823,27 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
}
}
+ if (new_len > OMAP_SHA_MAX_DMA_LEN) {
+ new_len = OMAP_SHA_MAX_DMA_LEN;
+ aligned = false;
+ }
+
if (!aligned)
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
else if (!list_ok)
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+ rctx->total = new_len;
+ rctx->offset += new_len;
rctx->sg_len = n;
- rctx->sg = sg;
+ if (rctx->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
return 0;
}
@@ -810,31 +853,35 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
- int nbytes;
+ unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
- int xmit_len, hash_later;
+ int hash_later;
bs = get_block_size(rctx);
+ nbytes = rctx->bufcnt;
+
if (update)
- nbytes = req->nbytes;
- else
- nbytes = 0;
+ nbytes += req->nbytes - rctx->offset;
- rctx->total = nbytes + rctx->bufcnt;
+ dev_dbg(rctx->dd->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset,
+ rctx->bufcnt);
- if (!rctx->total)
+ if (!nbytes)
return 0;
- if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
int len = bs - rctx->bufcnt % bs;
- if (len > nbytes)
- len = nbytes;
+ if (len > req->nbytes)
+ len = req->nbytes;
scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
0, len, 0);
rctx->bufcnt += len;
- nbytes -= len;
rctx->offset = len;
}
@@ -845,64 +892,25 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (ret)
return ret;
- xmit_len = rctx->total;
-
- if (!IS_ALIGNED(xmit_len, bs)) {
- if (final)
- xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
- else
- xmit_len = xmit_len / bs * bs;
- } else if (!final) {
- xmit_len -= bs;
- }
-
- hash_later = rctx->total - xmit_len;
+ hash_later = nbytes - rctx->total;
if (hash_later < 0)
hash_later = 0;
- if (rctx->bufcnt && nbytes) {
- /* have data from previous operation and current */
- sg_init_table(rctx->sgl, 2);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
-
- sg_chain(rctx->sgl, 2, req->src);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len++;
- } else if (rctx->bufcnt) {
- /* have buffered data only */
- sg_init_table(rctx->sgl, 1);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len = 1;
- }
-
if (hash_later) {
- int offset = 0;
-
- if (hash_later > req->nbytes) {
- memcpy(rctx->buffer, rctx->buffer + xmit_len,
- hash_later - req->nbytes);
- offset = hash_later - req->nbytes;
- }
-
- if (req->nbytes) {
- scatterwalk_map_and_copy(rctx->buffer + offset,
- req->src,
- offset + req->nbytes -
- hash_later, hash_later, 0);
- }
+ scatterwalk_map_and_copy(rctx->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
rctx->bufcnt = hash_later;
} else {
rctx->bufcnt = 0;
}
- if (!final)
- rctx->total = xmit_len;
+ if (hash_later > rctx->buflen)
+ set_bit(FLAGS_HUGE, &rctx->dd->flags);
+
+ rctx->total = min(nbytes, rctx->total);
return 0;
}
@@ -998,10 +1006,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
- bool final = ctx->flags & BIT(FLAGS_FINUP);
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
ctx->total < dd->fallback_sz)
@@ -1024,6 +1033,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
+ if (dd->flags & BIT(FLAGS_HUGE))
+ return 0;
+
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
/*
* faster to handle last block with cpu or
@@ -1083,7 +1095,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length + ctx->bufcnt));
+ get_order(ctx->sg->length));
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
kfree(ctx->sg);
@@ -1092,6 +1104,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ if (dd->flags & BIT(FLAGS_HUGE)) {
+ dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
+ omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS &&
+ (ctx->flags & BIT(FLAGS_FINUP)))
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ omap_sham_final_req(dd);
+ }
+ return;
+ }
+
if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1107,6 +1134,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
+ ctx->offset = 0;
+
if (req->base.complete)
req->base.complete(&req->base, err);
}
@@ -1158,7 +1187,7 @@ retry:
/* request has changed - restore hash */
dd->pdata->copy_hash(req, 0);
- if (ctx->op == OP_UPDATE) {
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
@@ -1730,6 +1759,8 @@ static void omap_sham_done_task(unsigned long data)
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
int err = 0;
+ dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
+
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
omap_sham_handle_queue(dd, NULL);
return;
@@ -2223,6 +2254,8 @@ static int omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
+ sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
+
return 0;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index c5b60f50e1b5..594d6b1695d5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
- u32 *flags = &tfm->crt_flags;
struct crypto_aes_ctx gen_aes;
int cpu;
- if (key_len % 8) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len % 8)
return -EINVAL;
- }
/*
* If the hardware is capable of generating the extended key
@@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ctx->cword.encrypt.keygen = 1;
ctx->cword.decrypt.keygen = 1;
- if (aes_expandkey(&gen_aes, in_key, key_len)) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (aes_expandkey(&gen_aes, in_key, key_len))
return -EINVAL;
- }
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index ddf1b549fdca..c826abe79e79 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -190,13 +190,11 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
return padlock_sha256_finup(desc, buf, 0, out);
}
-static int padlock_cra_init(struct crypto_tfm *tfm)
+static int padlock_init_tfm(struct crypto_shash *hash)
{
- struct crypto_shash *hash = __crypto_shash_cast(tfm);
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
struct crypto_shash *fallback_tfm;
- int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
@@ -204,21 +202,17 @@ static int padlock_cra_init(struct crypto_tfm *tfm)
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
+ return PTR_ERR(fallback_tfm);
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-
-out:
- return err;
}
-static void padlock_cra_exit(struct crypto_tfm *tfm)
+static void padlock_exit_tfm(struct crypto_shash *hash)
{
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
crypto_free_shash(ctx->fallback);
}
@@ -231,6 +225,8 @@ static struct shash_alg sha1_alg = {
.final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha1_state),
.base = {
@@ -241,8 +237,6 @@ static struct shash_alg sha1_alg = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
@@ -254,6 +248,8 @@ static struct shash_alg sha256_alg = {
.final = padlock_sha256_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha256_state),
.base = {
@@ -264,8 +260,6 @@ static struct shash_alg sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 29da449b3e9e..7384e91c8b32 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -490,7 +487,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -780,10 +776,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
- if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len > AES_MAX_KEY_SIZE)
return -EINVAL;
- }
/*
* IPSec engine only supports 128 and 256 bit AES keys. If we get a
@@ -805,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
CRYPTO_TFM_REQ_MASK);
err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
-
if (err)
goto sw_setkey_failed;
}
@@ -830,7 +818,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -1595,6 +1582,11 @@ static const struct of_device_id spacc_of_id_table[] = {
MODULE_DEVICE_TABLE(of, spacc_of_id_table);
#endif /* CONFIG_OF */
+static void spacc_tasklet_kill(void *data)
+{
+ tasklet_kill(data);
+}
+
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret;
@@ -1637,6 +1629,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
+ &engine->complete);
+ if (ret)
+ return ret;
+
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@@ -1694,8 +1694,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
platform_set_drvdata(pdev, engine);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 35bca76b640f..833bb1d3a11b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
@@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
- goto bad_key;
+ return -EINVAL;
qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 8caa04e1ec43..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- skcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index da1188abc9ba..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,6 +225,87 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 0a44a6eeacf5..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 40a59214d2e1..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 95ab16fc8fd6..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index fee07323f8f9..4217b745f124 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data)
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data)
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
@@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (keylen) {
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
break;
@@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
@@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm)
memset(ctx, 0, sizeof(*ctx));
crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
+
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ qce_skcipher_init(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
@@ -270,6 +282,7 @@ struct qce_skcipher_def {
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->min_keysize = def->min_keysize;
alg->max_keysize = def->max_keysize;
@@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
- alg->init = qce_skcipher_init;
- alg->exit = qce_skcipher_exit;
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index ca4de4ddfe1f..4a75c8e1fa6c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d4ea2f11ca68..466e30bd529c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
ctx->keylen = keylen;
@@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
- return ret;
+ return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 1aba9372cd23..4ef3eb11361c 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -4,7 +4,7 @@ config CRYPTO_DEV_STM32_CRC
depends on ARCH_STM32
select CRYPTO_HASH
help
- This enables support for the CRC32 hw accelerator which can be found
+ This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_HASH
@@ -17,7 +17,7 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_SHA256
select CRYPTO_ENGINE
help
- This enables support for the HASH hw accelerator which can be found
+ This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_CRYP
@@ -27,5 +27,5 @@ config CRYPTO_DEV_STM32_CRYP
select CRYPTO_ENGINE
select CRYPTO_LIB_DES
help
- This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 9e11c3480353..8e92e4ac79f1 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cfc8e0e37bee..167b80eec437 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -518,10 +518,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index d71d65846e47..9c6db7f698c4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc,
return 0;
badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
+ goto out;
err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
if (err)
@@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static void talitos_sg_unmap(struct device *dev,
@@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
keylen == AES_KEYSIZE_256)
return skcipher_setkey(cipher, key, keylen);
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
return -EINVAL;
}
@@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Must get the hash of the long key */
ret = keyhash(tfm, key, keylen, hash);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
keysize = digestsize;
memcpy(ctx->key, hash, digestsize);
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b731895aa241..f56d65c56ccf 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -11,18 +11,18 @@ config CRYPTO_DEV_UX500_CRYP
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
- This selects the crypto driver for the UX500_CRYP hardware. It supports
- AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+ This selects the crypto driver for the UX500_CRYP hardware. It supports
+ AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
+ help
+ This selects the hash driver for the UX500_HASH hardware.
+ Depends on UX500/STM DMA if running in DMA mode.
config CRYPTO_DEV_UX500_DEBUG
bool "Activate ux500 platform debug-mode for crypto and hash block"
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 95fb694a2667..800dfc4d16c4 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
default:
pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 4b71e80951b7..fd045e64972a 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions(
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
- goto bad_key;
+ return -EINVAL;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
- goto bad_key;
+ return -EINVAL;
/* Create encryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
@@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions(
return ret;
}
return 0;
-
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/* Note: kernel crypto API realization */
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index d59e736882f6..9fee1b1532a4 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
u8 tweak[AES_BLOCK_SIZE];
int ret;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 676ffcb64985..8da63f38e6bd 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -2,7 +2,7 @@
# Generic Trusted Execution Environment Configuration
config TEE
tristate "Trusted Execution Environment support"
- depends on HAVE_ARM_SMCCC || COMPILE_TEST
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -14,7 +14,7 @@ if TEE
menu "TEE drivers"
source "drivers/tee/optee/Kconfig"
-
+source "drivers/tee/amdtee/Kconfig"
endmenu
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 21f51fd88b07..68da044afbfa 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -4,3 +4,4 @@ tee-objs += tee_core.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_AMDTEE) += amdtee/
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
new file mode 100644
index 000000000000..4e32b6413b41
--- /dev/null
+++ b/drivers/tee/amdtee/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+# AMD-TEE Trusted Execution Environment Configuration
+config AMDTEE
+ tristate "AMD-TEE"
+ default m
+ depends on CRYPTO_DEV_SP_PSP
+ help
+ This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile
new file mode 100644
index 000000000000..ff1485266117
--- /dev/null
+++ b/drivers/tee/amdtee/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+obj-$(CONFIG_AMDTEE) += amdtee.o
+amdtee-objs += core.o
+amdtee-objs += call.o
+amdtee-objs += shm_pool.o
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
new file mode 100644
index 000000000000..ff48c3e47375
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+/*
+ * This file has definitions related to Host and AMD-TEE Trusted OS interface.
+ * These definitions must match the definitions on the TEE side.
+ */
+
+#ifndef AMDTEE_IF_H
+#define AMDTEE_IF_H
+
+#include <linux/types.h>
+
+/*****************************************************************************
+ ** TEE Param
+ ******************************************************************************/
+#define TEE_MAX_PARAMS 4
+
+/**
+ * struct memref - memory reference structure
+ * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ * @offset: offset in bytes from beginning of the buffer
+ * @size: data size in bytes
+ */
+struct memref {
+ u32 buf_id;
+ u32 offset;
+ u32 size;
+};
+
+struct value {
+ u32 a;
+ u32 b;
+};
+
+/*
+ * Parameters passed to open_session or invoke_command
+ */
+union tee_op_param {
+ struct memref mref;
+ struct value val;
+};
+
+struct tee_operation {
+ u32 param_types;
+ union tee_op_param params[TEE_MAX_PARAMS];
+};
+
+/* Must be same as in GP TEE specification */
+#define TEE_OP_PARAM_TYPE_NONE 0
+#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1
+#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2
+#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3
+#define TEE_OP_PARAM_TYPE_INVALID 4
+#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5
+#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6
+#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7
+
+#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF)
+#define TEE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*****************************************************************************
+ ** TEE Commands
+ *****************************************************************************/
+
+/*
+ * The shared memory between rich world and secure world may be physically
+ * non-contiguous. Below structures are meant to describe a shared memory region
+ * via scatter/gather (sg) list
+ */
+
+/**
+ * struct tee_sg_desc - sg descriptor for a physically contiguous buffer
+ * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned
+ * @hi_addr: [in] bits[63:32] of the buffer's physical address
+ * @size: [in] size in bytes (must be multiple of 4KB)
+ */
+struct tee_sg_desc {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+/**
+ * struct tee_sg_list - structure describing a scatter/gather list
+ * @count: [in] number of sg descriptors
+ * @size: [in] total size of all buffers in the list. Must be multiple of 4KB
+ * @buf: [in] list of sg buffer descriptors
+ */
+#define TEE_MAX_SG_DESC 64
+struct tee_sg_list {
+ u32 count;
+ u32 size;
+ struct tee_sg_desc buf[TEE_MAX_SG_DESC];
+};
+
+/**
+ * struct tee_cmd_map_shared_mem - command to map shared memory
+ * @buf_id: [out] return buffer ID value
+ * @sg_list: [in] list describing memory to be mapped
+ */
+struct tee_cmd_map_shared_mem {
+ u32 buf_id;
+ struct tee_sg_list sg_list;
+};
+
+/**
+ * struct tee_cmd_unmap_shared_mem - command to unmap shared memory
+ * @buf_id: [in] buffer ID of memory to be unmapped
+ */
+struct tee_cmd_unmap_shared_mem {
+ u32 buf_id;
+};
+
+/**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ */
+struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment
+ * @ta_handle: [in] handle of the loaded TA to be unloaded
+ */
+struct tee_cmd_unload_ta {
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [out] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_open_session {
+ u32 ta_handle;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint()
+ * in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [in] pointer to TA allocated session data
+ */
+struct tee_cmd_close_session {
+ u32 ta_handle;
+ u32 session_info;
+};
+
+/**
+ * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in
+ * TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @cmd_id: [in] TA command ID
+ * @session_info: [in] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_invoke_cmd {
+ u32 ta_handle;
+ u32 cmd_id;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+#endif /*AMDTEE_IF_H*/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
new file mode 100644
index 000000000000..d7f798c3394b
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#ifndef AMDTEE_PRIVATE_H
+#define AMDTEE_PRIVATE_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include "amdtee_if.h"
+
+#define DRIVER_NAME "amdtee"
+#define DRIVER_AUTHOR "AMD-TEE Linux driver team"
+
+/* Some GlobalPlatform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_GENERIC 0xFFFF0000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/* Maximum number of sessions which can be opened with a Trusted Application */
+#define TEE_NUM_SESSIONS 32
+
+#define TA_LOAD_PATH "/amdtee"
+#define TA_PATH_MAX 60
+
+/**
+ * struct amdtee - main service struct
+ * @teedev: client device
+ * @pool: shared memory pool
+ */
+struct amdtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct amdtee_session - Trusted Application (TA) session related information.
+ * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment
+ * @refcount: counter to keep track of sessions opened for the TA instance
+ * @session_info: an array pointing to TA allocated session data.
+ * @sess_mask: session usage bit-mask. If a particular bit is set, then the
+ * corresponding @session_info entry is in use or valid.
+ *
+ * Session structure is updated on open_session and this information is used for
+ * subsequent operations with the Trusted Application.
+ */
+struct amdtee_session {
+ struct list_head list_node;
+ u32 ta_handle;
+ struct kref refcount;
+ u32 session_info[TEE_NUM_SESSIONS];
+ DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS);
+ spinlock_t lock; /* synchronizes access to @sess_mask */
+};
+
+/**
+ * struct amdtee_context_data - AMD-TEE driver context data
+ * @sess_list: Keeps track of sessions opened in current TEE context
+ */
+struct amdtee_context_data {
+ struct list_head sess_list;
+};
+
+struct amdtee_driver_data {
+ struct amdtee *amdtee;
+};
+
+struct shmem_desc {
+ void *kaddr;
+ u64 size;
+};
+
+/**
+ * struct amdtee_shm_data - Shared memory data
+ * @kaddr: Kernel virtual address of shared memory
+ * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ */
+struct amdtee_shm_data {
+ struct list_head shm_node;
+ void *kaddr;
+ u32 buf_id;
+};
+
+struct amdtee_shm_context {
+ struct list_head shmdata_list;
+};
+
+#define LOWER_TWO_BYTE_MASK 0x0000FFFF
+
+/**
+ * set_session_id() - Sets the session identifier.
+ * @ta_handle: [in] handle of the loaded Trusted Application (TA)
+ * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1).
+ * @session: [out] Pointer to session id
+ *
+ * Lower two bytes of the session identifier represents the TA handle and the
+ * upper two bytes is session index.
+ */
+static inline void set_session_id(u32 ta_handle, u32 session_index,
+ u32 *session)
+{
+ *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle);
+}
+
+static inline u32 get_ta_handle(u32 session)
+{
+ return session & LOWER_TWO_BYTE_MASK;
+}
+
+static inline u32 get_session_index(u32 session)
+{
+ return (session >> 16) & LOWER_TWO_BYTE_MASK;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+int amdtee_close_session(struct tee_context *ctx, u32 session);
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+int amdtee_map_shmem(struct tee_shm *shm);
+
+void amdtee_unmap_shmem(struct tee_shm *shm);
+
+int handle_load_ta(void *data, u32 size,
+ struct tee_ioctl_open_session_arg *arg);
+
+int handle_unload_ta(u32 ta_handle);
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p);
+
+int handle_close_session(u32 ta_handle, u32 info);
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id);
+
+void handle_unmap_shmem(u32 buf_id);
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p);
+
+struct tee_shm_pool *amdtee_config_shm(void);
+
+u32 get_buffer_id(struct tee_shm *shm);
+#endif /*AMDTEE_PRIVATE_H*/
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
new file mode 100644
index 000000000000..096dd4d92d39
--- /dev/null
+++ b/drivers/tee/amdtee/call.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-tee.h>
+#include <linux/slab.h>
+#include <linux/psp-sev.h>
+#include "amdtee_if.h"
+#include "amdtee_private.h"
+
+static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ amd->param_types = 0;
+ for (i = 0; i < count; i++) {
+ /* AMD TEE does not support meta parameter */
+ if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
+ }
+
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE)
+ continue;
+
+ /* It is assumed that all values are within 2^32-1 */
+ if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
+ u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
+
+ amd->params[i].mref.buf_id = buf_id;
+ amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
+ amd->params[i].mref.size = tee[i].u.memref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ if (tee[i].u.value.c)
+ pr_warn("%s: Discarding value c", __func__);
+
+ amd->params[i].val.a = tee[i].u.value.a;
+ amd->params[i].val.b = tee[i].u.value.b;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ /* Assumes amd->param_types is valid */
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID ||
+ type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE ||
+ type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
+ type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
+ continue;
+
+ /*
+ * It is assumed that buf_id remains unchanged for
+ * both open_session and invoke_cmd call
+ */
+ if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
+ tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
+ tee[i].u.memref.size = amd->params[i].mref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ /* field 'c' not supported by AMD TEE */
+ tee[i].u.value.a = amd->params[i].val.a;
+ tee[i].u.value.b = amd->params[i].val.b;
+ tee[i].u.value.c = 0;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+int handle_unload_ta(u32 ta_handle)
+{
+ struct tee_cmd_unload_ta cmd = {0};
+ u32 status;
+ int ret;
+
+ if (!ta_handle)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("unload ta: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+int handle_close_session(u32 ta_handle, u32 info)
+{
+ struct tee_cmd_close_session cmd = {0};
+ u32 status;
+ int ret;
+
+ if (ta_handle == 0)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+ cmd.session_info = info;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("close session: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void handle_unmap_shmem(u32 buf_id)
+{
+ struct tee_cmd_unmap_shared_mem cmd = {0};
+ u32 status;
+ int ret;
+
+ cmd.buf_id = buf_id;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret)
+ pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
+ buf_id, status);
+}
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p)
+{
+ struct tee_cmd_invoke_cmd cmd = {0};
+ int ret;
+
+ if (!arg || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort invoke command\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ cmd.cmd_id = arg->func;
+ cmd.session_info = sinfo;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("invoke command: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
+ arg->ret_origin, arg->ret);
+ }
+
+ return ret;
+}
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
+{
+ struct tee_cmd_map_shared_mem *cmd;
+ phys_addr_t paddr;
+ int ret, i;
+ u32 status;
+
+ if (!count || !start || !buf_id)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* Size must be page aligned */
+ for (i = 0; i < count ; i++) {
+ if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+
+ if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
+ pr_err("map shared memory: page unaligned. addr 0x%llx",
+ (u64)start[i].kaddr);
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+ }
+
+ cmd->sg_list.count = count;
+
+ /* Create buffer list */
+ for (i = 0; i < count ; i++) {
+ paddr = __psp_pa(start[i].kaddr);
+ cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
+ cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
+ cmd->sg_list.buf[i].size = start[i].size;
+ cmd->sg_list.size += cmd->sg_list.buf[i].size;
+
+ pr_debug("buf[%d]:hi addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].hi_addr);
+ pr_debug("buf[%d]:low addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].low_addr);
+ pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
+ pr_debug("list size = 0x%x\n", cmd->sg_list.size);
+ }
+
+ *buf_id = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
+ sizeof(*cmd), &status);
+ if (!ret && !status) {
+ *buf_id = cmd->buf_id;
+ pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
+ } else {
+ pr_err("map shared memory: status = 0x%x\n", status);
+ ret = -ENOMEM;
+ }
+
+free_cmd:
+ kfree(cmd);
+
+ return ret;
+}
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p)
+{
+ struct tee_cmd_open_session cmd = {0};
+ int ret;
+
+ if (!arg || !info || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_GENERIC;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort open session\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ *info = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("open session: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ *info = cmd.session_info;
+ pr_debug("open session: session info = 0x%x\n", *info);
+ }
+
+ pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
+ arg->ret_origin);
+
+ return ret;
+}
+
+int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+{
+ struct tee_cmd_load_ta cmd = {0};
+ phys_addr_t blob;
+ int ret;
+
+ if (size == 0 || !data || !arg)
+ return -EINVAL;
+
+ blob = __psp_pa(data);
+ if (blob & (PAGE_SIZE - 1)) {
+ pr_err("load TA: page unaligned. blob 0x%llx", blob);
+ return -EINVAL;
+ }
+
+ cmd.hi_addr = upper_32_bits(blob);
+ cmd.low_addr = lower_32_bits(blob);
+ cmd.size = size;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ set_session_id(cmd.ta_handle, 0, &arg->session);
+ }
+
+ pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+ cmd.ta_handle, arg->ret_origin, arg->ret);
+
+ return 0;
+}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
new file mode 100644
index 000000000000..6370bb55f512
--- /dev/null
+++ b/drivers/tee/amdtee/core.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdtee_private.h"
+#include "../tee_private.h"
+#include <linux/psp-tee.h>
+
+static struct amdtee_driver_data *drv_data;
+static DEFINE_MUTEX(session_list_mutex);
+static struct amdtee_shm_context shmctx;
+
+static void amdtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_AMDTEE,
+ .impl_caps = 0,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int amdtee_open(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+ INIT_LIST_HEAD(&shmctx.shmdata_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void release_session(struct amdtee_session *sess)
+{
+ int i;
+
+ /* Close any open session */
+ for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
+ /* Check if session entry 'i' is valid */
+ if (!test_bit(i, sess->sess_mask))
+ continue;
+
+ handle_close_session(sess->ta_handle, sess->session_info[i]);
+ }
+
+ /* Unload Trusted Application once all sessions are closed */
+ handle_unload_ta(sess->ta_handle);
+ kfree(sess);
+}
+
+static void amdtee_release(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+
+ if (!ctxdata)
+ return;
+
+ while (true) {
+ struct amdtee_session *sess;
+
+ sess = list_first_entry_or_null(&ctxdata->sess_list,
+ struct amdtee_session,
+ list_node);
+
+ if (!sess)
+ break;
+
+ list_del(&sess->list_node);
+ release_session(sess);
+ }
+ kfree(ctxdata);
+
+ ctx->data = NULL;
+}
+
+/**
+ * alloc_session() - Allocate a session structure
+ * @ctxdata: TEE Context data structure
+ * @session: Session ID for which 'struct amdtee_session' structure is to be
+ * allocated.
+ *
+ * Scans the TEE context's session list to check if TA is already loaded in to
+ * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
+ * initializes a new 'session' structure and adds it to context's session list.
+ *
+ * The caller must hold a mutex.
+ *
+ * Returns:
+ * 'struct amdtee_session *' on success and NULL on failure.
+ */
+static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ struct amdtee_session *sess;
+ u32 ta_handle = get_ta_handle(session);
+
+ /* Scan session list to check if TA is already loaded in to TEE */
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->ta_handle == ta_handle) {
+ kref_get(&sess->refcount);
+ return sess;
+ }
+
+ /* Allocate a new session and add to list */
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (sess) {
+ sess->ta_handle = ta_handle;
+ kref_init(&sess->refcount);
+ spin_lock_init(&sess->lock);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ }
+
+ return sess;
+}
+
+/* Requires mutex to be held */
+static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ u32 ta_handle = get_ta_handle(session);
+ u32 index = get_session_index(session);
+ struct amdtee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (ta_handle == sess->ta_handle &&
+ test_bit(index, sess->sess_mask))
+ return sess;
+
+ return NULL;
+}
+
+u32 get_buffer_id(struct tee_shm *shm)
+{
+ u32 buf_id = 0;
+ struct amdtee_shm_data *shmdata;
+
+ list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
+ if (shmdata->kaddr == shm->kaddr) {
+ buf_id = shmdata->buf_id;
+ break;
+ }
+
+ return buf_id;
+}
+
+static DEFINE_MUTEX(drv_mutex);
+static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
+ size_t *ta_size)
+{
+ const struct firmware *fw;
+ char fw_name[TA_PATH_MAX];
+ struct {
+ u32 lo;
+ u16 mid;
+ u16 hi_ver;
+ u8 seq_n[8];
+ } *uuid = ptr;
+ int n, rc = 0;
+
+ n = snprintf(fw_name, TA_PATH_MAX,
+ "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
+ TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
+ uuid->seq_n[0], uuid->seq_n[1],
+ uuid->seq_n[2], uuid->seq_n[3],
+ uuid->seq_n[4], uuid->seq_n[5],
+ uuid->seq_n[6], uuid->seq_n[7]);
+ if (n < 0 || n >= TA_PATH_MAX) {
+ pr_err("failed to get firmware name\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+ n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
+ if (n) {
+ pr_err("failed to load firmware %s\n", fw_name);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ *ta_size = roundup(fw->size, PAGE_SIZE);
+ *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
+ if (IS_ERR(*ta)) {
+ pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
+ (u64)*ta);
+ rc = -ENOMEM;
+ goto rel_fw;
+ }
+
+ memcpy(*ta, fw->data, fw->size);
+rel_fw:
+ release_firmware(fw);
+unlock:
+ mutex_unlock(&drv_mutex);
+ return rc;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess = NULL;
+ u32 session_info;
+ size_t ta_size;
+ int rc, i;
+ void *ta;
+
+ if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
+ pr_err("unsupported client login method\n");
+ return -EINVAL;
+ }
+
+ rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
+ if (rc) {
+ pr_err("failed to copy TA binary\n");
+ return rc;
+ }
+
+ /* Load the TA binary into TEE environment */
+ handle_load_ta(ta, ta_size, arg);
+ if (arg->ret == TEEC_SUCCESS) {
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+ }
+
+ if (arg->ret != TEEC_SUCCESS)
+ goto out;
+
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Find an empty session index for the given TA */
+ spin_lock(&sess->lock);
+ i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+ if (i < TEE_NUM_SESSIONS)
+ set_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+
+ if (i >= TEE_NUM_SESSIONS) {
+ pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+
+ if (arg->ret == TEEC_SUCCESS) {
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
+ } else {
+ pr_err("open_session failed %d\n", arg->ret);
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+out:
+ free_pages((u64)ta, get_order(ta_size));
+ return rc;
+}
+
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
+int amdtee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ u32 i, ta_handle, session_info;
+ struct amdtee_session *sess;
+
+ pr_debug("%s: sid = 0x%x\n", __func__, session);
+
+ /*
+ * Check that the session is valid and clear the session
+ * usage bit
+ */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, session);
+ if (sess) {
+ ta_handle = get_ta_handle(session);
+ i = get_session_index(session);
+ session_info = sess->session_info[i];
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Close the session */
+ handle_close_session(ta_handle, session_info);
+
+ kref_put(&sess->refcount, destroy_session);
+
+ return 0;
+}
+
+int amdtee_map_shmem(struct tee_shm *shm)
+{
+ struct shmem_desc shmem;
+ struct amdtee_shm_data *shmnode;
+ int rc, count;
+ u32 buf_id;
+
+ if (!shm)
+ return -EINVAL;
+
+ shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
+ if (!shmnode)
+ return -ENOMEM;
+
+ count = 1;
+ shmem.kaddr = shm->kaddr;
+ shmem.size = shm->size;
+
+ /*
+ * Send a MAP command to TEE and get the corresponding
+ * buffer Id
+ */
+ rc = handle_map_shmem(count, &shmem, &buf_id);
+ if (rc) {
+ pr_err("map_shmem failed: ret = %d\n", rc);
+ kfree(shmnode);
+ return rc;
+ }
+
+ shmnode->kaddr = shm->kaddr;
+ shmnode->buf_id = buf_id;
+ list_add(&shmnode->shm_node, &shmctx.shmdata_list);
+
+ pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+
+ return 0;
+}
+
+void amdtee_unmap_shmem(struct tee_shm *shm)
+{
+ struct amdtee_shm_data *shmnode;
+ u32 buf_id;
+
+ if (!shm)
+ return;
+
+ buf_id = get_buffer_id(shm);
+ /* Unmap the shared memory from TEE */
+ handle_unmap_shmem(buf_id);
+
+ list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
+ if (buf_id == shmnode->buf_id) {
+ list_del(&shmnode->shm_node);
+ kfree(shmnode);
+ break;
+ }
+}
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess;
+ u32 i, session_info;
+
+ /* Check that the session is valid */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, arg->session);
+ if (sess) {
+ i = get_session_index(arg->session);
+ session_info = sess->session_info[i];
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ handle_invoke_cmd(arg, session_info, param);
+
+ return 0;
+}
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ return -EINVAL;
+}
+
+static const struct tee_driver_ops amdtee_ops = {
+ .get_version = amdtee_get_version,
+ .open = amdtee_open,
+ .release = amdtee_release,
+ .open_session = amdtee_open_session,
+ .close_session = amdtee_close_session,
+ .invoke_func = amdtee_invoke_func,
+ .cancel_req = amdtee_cancel_req,
+};
+
+static const struct tee_desc amdtee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &amdtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init amdtee_driver_init(void)
+{
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct amdtee *amdtee;
+ int rc;
+
+ rc = psp_check_tee_status();
+ if (rc) {
+ pr_err("amd-tee driver: tee not present\n");
+ return rc;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
+ if (!amdtee) {
+ rc = -ENOMEM;
+ goto err_kfree_drv_data;
+ }
+
+ pool = amdtee_config_shm();
+ if (IS_ERR(pool)) {
+ pr_err("shared pool configuration error\n");
+ rc = PTR_ERR(pool);
+ goto err_kfree_amdtee;
+ }
+
+ teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ amdtee->teedev = teedev;
+
+ rc = tee_device_register(amdtee->teedev);
+ if (rc)
+ goto err_device_unregister;
+
+ amdtee->pool = pool;
+
+ drv_data->amdtee = amdtee;
+
+ pr_info("amd-tee driver initialization successful\n");
+ return 0;
+
+err_device_unregister:
+ tee_device_unregister(amdtee->teedev);
+
+err_free_pool:
+ tee_shm_pool_free(pool);
+
+err_kfree_amdtee:
+ kfree(amdtee);
+
+err_kfree_drv_data:
+ kfree(drv_data);
+ drv_data = NULL;
+
+ pr_err("amd-tee driver initialization failed\n");
+ return rc;
+}
+module_init(amdtee_driver_init);
+
+static void __exit amdtee_driver_exit(void)
+{
+ struct amdtee *amdtee;
+
+ if (!drv_data || !drv_data->amdtee)
+ return;
+
+ amdtee = drv_data->amdtee;
+
+ tee_device_unregister(amdtee->teedev);
+ tee_shm_pool_free(amdtee->pool);
+}
+module_exit(amdtee_driver_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION("AMD-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
new file mode 100644
index 000000000000..065854e2db18
--- /dev/null
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-sev.h>
+#include "amdtee_private.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned long va;
+ int rc;
+
+ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va)
+ return -ENOMEM;
+
+ shm->kaddr = (void *)va;
+ shm->paddr = __psp_pa((void *)va);
+ shm->size = PAGE_SIZE << order;
+
+ /* Map the allocated memory in to TEE */
+ rc = amdtee_map_shmem(shm);
+ if (rc) {
+ free_pages(va, order);
+ shm->kaddr = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
+{
+ /* Unmap the shared memory from TEE */
+ amdtee_unmap_shmem(shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+struct tee_shm_pool *amdtee_config_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f91db24bbf3b..db1ef144c63a 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1586,7 +1586,7 @@ ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm,
}
crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
if (*key_size == 0)
- *key_size = crypto_skcipher_default_keysize(*key_tfm);
+ *key_size = crypto_skcipher_max_keysize(*key_tfm);
get_random_bytes(dummy_key, *key_size);
rc = crypto_skcipher_setkey(*key_tfm, dummy_key, *key_size);
if (rc) {
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 216fbe6a4837..7d326aa0308e 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -2204,9 +2204,9 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
if (mount_crypt_stat->global_default_cipher_key_size == 0) {
printk(KERN_WARNING "No key size specified at mount; "
"defaulting to [%d]\n",
- crypto_skcipher_default_keysize(tfm));
+ crypto_skcipher_max_keysize(tfm));
mount_crypt_stat->global_default_cipher_key_size =
- crypto_skcipher_default_keysize(tfm);
+ crypto_skcipher_max_keysize(tfm);
}
if (crypt_stat->key_size == 0)
crypt_stat->key_size =
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index a3bdadf6221e..1b3ebe8593c0 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -227,6 +227,16 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+ return alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 5cd846defdd6..e115f9215ed5 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -47,7 +47,13 @@ struct crypto_instance {
struct crypto_alg alg;
struct crypto_template *tmpl;
- struct hlist_node list;
+
+ union {
+ /* Node in list of instances after registration. */
+ struct hlist_node list;
+ /* List of attached spawns before registration. */
+ struct crypto_spawn *spawns;
+ };
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -57,8 +63,6 @@ struct crypto_template {
struct hlist_head instances;
struct module *module;
- struct crypto_instance *(*alloc)(struct rtattr **tb);
- void (*free)(struct crypto_instance *inst);
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -67,9 +71,16 @@ struct crypto_template {
struct crypto_spawn {
struct list_head list;
struct crypto_alg *alg;
- struct crypto_instance *inst;
+ union {
+ /* Back pointer to instance after registration.*/
+ struct crypto_instance *inst;
+ /* Spawn list pointer prior to registration. */
+ struct crypto_spawn *next;
+ };
const struct crypto_type *frontend;
u32 mask;
+ bool dead;
+ bool registered;
};
struct crypto_queue {
@@ -95,45 +106,21 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
-int crypto_unregister_instance(struct crypto_instance *inst);
-
-int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst, u32 mask);
-int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst,
- const struct crypto_type *frontend);
-int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
- u32 type, u32 mask);
+void crypto_unregister_instance(struct crypto_instance *inst);
+int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
-static inline void crypto_set_spawn(struct crypto_spawn *spawn,
- struct crypto_instance *inst)
-{
- spawn->inst = inst;
-}
-
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type);
const char *crypto_attr_alg_name(struct rtattr *rta);
-struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
- const struct crypto_type *frontend,
- u32 type, u32 mask);
-
-static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
- u32 type, u32 mask)
-{
- return crypto_attr_alg2(rta, NULL, type, mask);
-}
-
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
-void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
- unsigned int head);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -200,13 +187,38 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst)
return inst->__ctx;
}
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
static inline struct crypto_cipher *crypto_spawn_cipher(
- struct crypto_spawn *spawn)
+ struct crypto_cipher_spawn *spawn)
{
u32 type = CRYPTO_ALG_TYPE_CIPHER;
u32 mask = CRYPTO_ALG_TYPE_MASK;
- return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
}
static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
@@ -221,12 +233,6 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
- u32 type, u32 mask)
-{
- return crypto_attr_alg(tb[1], type, mask);
-}
-
static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
{
return (type ^ off) & mask & off;
diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h
index c71f6ef47f0f..38f490cd50a8 100644
--- a/include/crypto/cast6.h
+++ b/include/crypto/cast6.h
@@ -15,11 +15,10 @@ struct cast6_ctx {
u8 Kr[12][4];
};
-int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
+int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
-void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
-void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src);
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index fe7f73bad1e2..cee446c59497 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -169,6 +169,17 @@ struct shash_desc {
* @export: see struct ahash_alg
* @import: see struct ahash_alg
* @setkey: see struct ahash_alg
+ * @init_tfm: Initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation
+ * time, right after the transformation context was
+ * allocated. In case the cryptographic hardware has
+ * some special requirements which need to be handled
+ * by software, this function shall check for the precise
+ * requirement of the transformation and put any software
+ * fallbacks in place.
+ * @exit_tfm: Deinitialize the cryptographic transformation object.
+ * This is a counterpart to @init_tfm, used to remove
+ * various changes set in @init_tfm.
* @digestsize: see struct ahash_alg
* @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
@@ -189,6 +200,8 @@ struct shash_alg {
int (*import)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
+ int (*init_tfm)(struct crypto_shash *tfm);
+ void (*exit_tfm)(struct crypto_shash *tfm);
unsigned int descsize;
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 9de57367afbb..cf478681b53e 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -68,10 +68,8 @@ int crypto_register_acomp(struct acomp_alg *alg);
* compression algorithm
*
* @alg: algorithm definition
- *
- * Return: zero on success; error code in case of error
*/
-int crypto_unregister_acomp(struct acomp_alg *alg);
+void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index c509ec30fc65..27b7b0224ea6 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -81,14 +81,9 @@ static inline struct aead_request *aead_request_cast(
return container_of(req, struct aead_request, base);
}
-static inline void crypto_set_aead_spawn(
- struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_aead(struct crypto_aead_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
{
@@ -113,16 +108,6 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
aead->reqsize = reqsize;
}
-static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
-{
- return alg->maxauthsize;
-}
-
-static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
-{
- return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
-}
-
static inline void aead_init_queue(struct aead_queue *queue,
unsigned int max_qlen)
{
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index d6c8a42789ad..8d3220c9ab77 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -78,15 +78,9 @@ static inline void *akcipher_instance_ctx(struct akcipher_instance *inst)
return crypto_instance_ctx(akcipher_crypto_instance(inst));
}
-static inline void crypto_set_akcipher_spawn(
- struct crypto_akcipher_spawn *spawn,
- struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline struct crypto_akcipher *crypto_spawn_akcipher(
struct crypto_akcipher_spawn *spawn)
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
index aa5d4a16aac5..b085dc1ac151 100644
--- a/include/crypto/internal/chacha.h
+++ b/include/crypto/internal/chacha.h
@@ -34,7 +34,7 @@ static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return chacha_setkey(tfm, key, keysize, 20);
}
-static int inline chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize)
{
return chacha_setkey(tfm, key, keysize, 12);
diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h
index f62a2bb1866b..723fe5bf16da 100644
--- a/include/crypto/internal/des.h
+++ b/include/crypto/internal/des.h
@@ -35,10 +35,6 @@ static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key)
else
err = 0;
}
-
- if (err)
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
-
memzero_explicit(&tmp, sizeof(tmp));
return err;
}
@@ -95,14 +91,9 @@ bad:
static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm,
const u8 *key)
{
- int err;
-
- err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
- crypto_tfm_get_flags(tfm) &
- CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- if (err)
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
- return err;
+ return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
+ crypto_tfm_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
}
static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm,
@@ -120,20 +111,16 @@ static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm,
static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
- if (keylen != DES_KEY_SIZE) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != DES_KEY_SIZE)
return -EINVAL;
- }
return crypto_des_verify_key(crypto_aead_tfm(tfm), key);
}
static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != DES3_EDE_KEY_SIZE)
return -EINVAL;
- }
return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key);
}
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 0108c0c7b2ed..229d37681a9d 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -21,7 +21,6 @@ struct aead_geniv_ctx {
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index bfc9db7b100d..89f6f46ab2b8 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -30,11 +30,25 @@ struct crypto_hash_walk {
};
struct ahash_instance {
- struct ahash_alg alg;
+ void (*free)(struct ahash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct ahash_alg, halg.base)];
+ struct crypto_instance base;
+ } s;
+ struct ahash_alg alg;
+ };
};
struct shash_instance {
- struct shash_alg alg;
+ void (*free)(struct shash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct shash_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct shash_alg alg;
+ };
};
struct crypto_ahash_spawn {
@@ -45,8 +59,6 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
-extern const struct crypto_type crypto_ahash_type;
-
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
@@ -70,12 +82,11 @@ static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
}
int crypto_register_ahash(struct ahash_alg *alg);
-int crypto_unregister_ahash(struct ahash_alg *alg);
+void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
-void ahash_free_instance(struct crypto_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -85,37 +96,51 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
+{
+ return crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
-int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
- struct hash_alg_common *alg,
- struct crypto_instance *inst);
+int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+static inline struct hash_alg_common *crypto_spawn_ahash_alg(
+ struct crypto_ahash_spawn *spawn)
+{
+ return __crypto_hash_alg_common(spawn->base.alg);
+}
int crypto_register_shash(struct shash_alg *alg);
-int crypto_unregister_shash(struct shash_alg *alg);
+void crypto_unregister_shash(struct shash_alg *alg);
int crypto_register_shashes(struct shash_alg *algs, int count);
-int crypto_unregister_shashes(struct shash_alg *algs, int count);
+void crypto_unregister_shashes(struct shash_alg *algs, int count);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst);
-void shash_free_instance(struct crypto_instance *inst);
+void shash_free_singlespawn_instance(struct shash_instance *inst);
-int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
- struct shash_alg *alg,
- struct crypto_instance *inst);
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+static inline struct shash_alg *crypto_spawn_shash_alg(
+ struct crypto_shash_spawn *spawn)
+{
+ return __crypto_shash_alg(spawn->base.alg);
+}
int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
@@ -143,13 +168,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
- return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
+ return &inst->s.base;
}
static inline struct ahash_instance *ahash_instance(
struct crypto_instance *inst)
{
- return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
+ return container_of(inst, struct ahash_instance, s.base);
}
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
@@ -157,17 +182,6 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst)
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
-static inline unsigned int ahash_instance_headroom(void)
-{
- return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
-}
-
-static inline struct ahash_instance *ahash_alloc_instance(
- const char *name, struct crypto_alg *alg)
-{
- return crypto_alloc_instance(name, alg, ahash_instance_headroom());
-}
-
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
req->base.complete(&req->base, err);
@@ -204,26 +218,24 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
static inline struct crypto_instance *shash_crypto_instance(
struct shash_instance *inst)
{
- return container_of(&inst->alg.base, struct crypto_instance, alg);
+ return &inst->s.base;
}
static inline struct shash_instance *shash_instance(
struct crypto_instance *inst)
{
- return container_of(__crypto_shash_alg(&inst->alg),
- struct shash_instance, alg);
+ return container_of(inst, struct shash_instance, s.base);
}
-static inline void *shash_instance_ctx(struct shash_instance *inst)
+static inline struct shash_instance *shash_alg_instance(
+ struct crypto_shash *shash)
{
- return crypto_instance_ctx(shash_crypto_instance(inst));
+ return shash_instance(crypto_tfm_alg_instance(&shash->base));
}
-static inline struct shash_instance *shash_alloc_instance(
- const char *name, struct crypto_alg *alg)
+static inline void *shash_instance_ctx(struct shash_instance *inst)
{
- return crypto_alloc_instance(name, alg,
- sizeof(struct shash_alg) - sizeof(*alg));
+ return crypto_instance_ctx(shash_crypto_instance(inst));
}
static inline struct crypto_shash *crypto_spawn_shash(
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 479b0cab2a1a..064e52ca5248 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -11,48 +11,23 @@
#include <crypto/poly1305.h>
/*
- * Poly1305 core functions. These implement the ε-almost-∆-universal hash
- * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
- * ("s key") at the end. They also only support block-aligned inputs.
+ * Poly1305 core functions. These only accept whole blocks; the caller must
+ * handle any needed block buffering and padding. 'hibit' must be 1 for any
+ * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is
+ * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise,
+ * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
*/
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
+
+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
static inline void poly1305_core_init(struct poly1305_state *state)
{
*state = (struct poly1305_state){};
}
void poly1305_core_blocks(struct poly1305_state *state,
- const struct poly1305_key *key, const void *src,
+ const struct poly1305_core_key *key, const void *src,
unsigned int nblocks, u32 hibit);
-void poly1305_core_emit(const struct poly1305_state *state, void *dst);
-
-/*
- * Poly1305 requires a unique key for each tag, which implies that we can't set
- * it on the tfm that gets accessed by multiple users simultaneously. Instead we
- * expect the key as the first 32 bytes in the update() call.
- */
-static inline
-unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(dctx->r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst);
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 6727ef0fc4d1..f834274c2493 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -112,10 +112,8 @@ int crypto_register_scomp(struct scomp_alg *alg);
* compression algorithm
*
* @alg: algorithm definition
- *
- * Return: zero on success; error code in case of error
*/
-int crypto_unregister_scomp(struct scomp_alg *alg);
+void crypto_unregister_scomp(struct scomp_alg *alg);
int crypto_register_scomps(struct scomp_alg *algs, int count);
void crypto_unregister_scomps(struct scomp_alg *algs, int count);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 921c409fe1b1..10226c12c5df 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -88,14 +88,9 @@ static inline void skcipher_request_complete(struct skcipher_request *req, int e
req->base.complete(&req->base, err);
}
-static inline void crypto_set_skcipher_spawn(
- struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
@@ -140,8 +135,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
void skcipher_walk_atomise(struct skcipher_walk *walk);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
-int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
- bool atomic);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
@@ -214,9 +207,17 @@ skcipher_cipher_simple(struct crypto_skcipher *tfm)
return ctx->cipher;
}
-struct skcipher_instance *
-skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
- struct crypto_alg **cipher_alg_ret);
+
+struct skcipher_instance *skcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct crypto_alg *skcipher_ialg_simple(
+ struct skcipher_instance *inst)
+{
+ struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
+
+ return crypto_spawn_cipher_alg(spawn);
+}
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
index 53c04423c582..306925fea190 100644
--- a/include/crypto/nhpoly1305.h
+++ b/include/crypto/nhpoly1305.h
@@ -7,7 +7,7 @@
#define _NHPOLY1305_H
#include <crypto/hash.h>
-#include <crypto/poly1305.h>
+#include <crypto/internal/poly1305.h>
/* NH parameterization: */
@@ -33,7 +33,7 @@
#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
struct nhpoly1305_key {
- struct poly1305_key poly_key;
+ struct poly1305_core_key poly_key;
u32 nh_key[NH_KEY_WORDS];
};
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 74c6e1cd73ee..f1f67fc749cf 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -13,12 +13,29 @@
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
+/* The poly1305_key and poly1305_state types are mostly opaque and
+ * implementation-defined. Limbs might be in base 2^64 or base 2^26, or
+ * different yet. The union type provided keeps these 64-bit aligned for the
+ * case in which this is implemented using 64x64 multiplies.
+ */
+
struct poly1305_key {
- u32 r[5]; /* key, base 2^26 */
+ union {
+ u32 r[5];
+ u64 r64[3];
+ };
+};
+
+struct poly1305_core_key {
+ struct poly1305_key key;
+ struct poly1305_key precomputed_s;
};
struct poly1305_state {
- u32 h[5]; /* accumulator, base 2^26 */
+ union {
+ u32 h[5];
+ u64 h64[3];
+ };
};
struct poly1305_desc_ctx {
@@ -35,7 +52,10 @@ struct poly1305_desc_ctx {
/* accumulator */
struct poly1305_state h;
/* key */
- struct poly1305_key r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE];
+ union {
+ struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE];
+ struct poly1305_core_key core_r;
+ };
};
void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
index 7dd780c5d058..75c7eaa20853 100644
--- a/include/crypto/serpent.h
+++ b/include/crypto/serpent.h
@@ -22,7 +22,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen);
int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src);
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index b4655d91661f..141e7690f9c3 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -35,14 +35,7 @@ struct skcipher_request {
};
struct crypto_skcipher {
- int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct skcipher_request *req);
- int (*decrypt)(struct skcipher_request *req);
-
- unsigned int ivsize;
unsigned int reqsize;
- unsigned int keysize;
struct crypto_tfm base;
};
@@ -255,7 +248,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return tfm->ivsize;
+ return crypto_skcipher_alg(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -366,11 +359,8 @@ static inline void crypto_sync_skcipher_clear_flags(
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return tfm->setkey(tfm, key, keylen);
-}
+int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen);
static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -378,10 +368,16 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
-static inline unsigned int crypto_skcipher_default_keysize(
+static inline unsigned int crypto_skcipher_min_keysize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg(tfm)->min_keysize;
+}
+
+static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return tfm->keysize;
+ return crypto_skcipher_alg(tfm)->max_keysize;
}
/**
diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h
index 2e2c09673d88..f6b307a58554 100644
--- a/include/crypto/twofish.h
+++ b/include/crypto/twofish.h
@@ -19,7 +19,7 @@ struct twofish_ctx {
};
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
- unsigned int key_len, u32 *flags);
+ unsigned int key_len);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
#endif
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 75fd96ff976b..0f8dba69feb4 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,28 +8,19 @@
#define XTS_BLOCK_SIZE 16
-#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
-
static inline int xts_check_key(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
- u32 *flags = &tfm->crt_flags;
-
/*
* key consists of keys of equal size concatenated, therefore
* the length must be even.
*/
- if (keylen % 2) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (keylen % 2)
return -EINVAL;
- }
/* ensure that the AES and tweak key are not identical */
- if (fips_enabled &&
- !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
return -EINVAL;
- }
return 0;
}
@@ -41,18 +32,14 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm,
* key consists of keys of equal size concatenated, therefore
* the length must be even.
*/
- if (keylen % 2) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen % 2)
return -EINVAL;
- }
/* ensure that the AES and tweak key are not identical */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
- !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ !crypto_memneq(key, key + (keylen / 2), keylen / 2))
return -EINVAL;
- }
return 0;
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e51ee772b9f5..def48a583670 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
+ CPUHP_PADATA_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 23365a9d062e..763863dbc079 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,16 +107,9 @@
#define CRYPTO_TFM_NEED_KEY 0x00000001
#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
/*
* Miscellaneous stuff.
@@ -570,7 +563,7 @@ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
reinit_completion(&wait->completion);
err = wait->err;
break;
- };
+ }
return err;
}
@@ -584,9 +577,9 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
int crypto_register_algs(struct crypto_alg *algs, int count);
-int crypto_unregister_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
/*
* Algorithm query interface.
@@ -599,34 +592,10 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
* crypto_free_*(), as well as the various helpers below.
*/
-struct cipher_tfm {
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_cipher crt_u.cipher
-#define crt_compress crt_u.compress
-
struct crypto_tfm {
u32 crt_flags;
- union {
- struct cipher_tfm cipher;
- struct compress_tfm compress;
- } crt_u;
-
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -763,12 +732,6 @@ static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
return (struct crypto_cipher *)tfm;
}
-static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return __crypto_cipher_cast(tfm);
-}
-
/**
* crypto_alloc_cipher() - allocate single block cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -826,11 +789,6 @@ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
-static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->crt_cipher;
-}
-
/**
* crypto_cipher_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -884,12 +842,8 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
- key, keylen);
-}
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
/**
* crypto_cipher_encrypt_one() - encrypt one block of plaintext
@@ -900,12 +854,8 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
* Invoke the encryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
-static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
/**
* crypto_cipher_decrypt_one() - decrypt one block of ciphertext
@@ -916,25 +866,14 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
* Invoke the decryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
-static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
}
-static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
-{
- BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
- CRYPTO_ALG_TYPE_MASK);
- return __crypto_comp_cast(tfm);
-}
-
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
u32 type, u32 mask)
{
@@ -969,26 +908,13 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm)
return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
}
-static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
-{
- return &crypto_comp_tfm(tfm)->crt_compress;
-}
+int crypto_comp_compress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
-static inline int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
+int crypto_comp_decompress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 23717eeaad23..a0d8b41850b2 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -9,17 +9,17 @@
#ifndef PADATA_H
#define PADATA_H
+#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/notifier.h>
#include <linux/kobject.h>
#define PADATA_CPU_SERIAL 0x01
#define PADATA_CPU_PARALLEL 0x02
/**
- * struct padata_priv - Embedded to the users data structure.
+ * struct padata_priv - Represents one job
*
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
@@ -42,7 +42,7 @@ struct padata_priv {
};
/**
- * struct padata_list
+ * struct padata_list - one per work type per CPU
*
* @list: List head.
* @lock: List lock.
@@ -70,9 +70,6 @@ struct padata_serial_queue {
*
* @parallel: List to wait for parallelization.
* @reorder: List to wait for reordering after parallel processing.
- * @serial: List to wait for serialization after reordering.
- * @pwork: work struct for parallelization.
- * @swork: work struct for serialization.
* @work: work struct for parallelization.
* @num_obj: Number of objects that are processed by this cpu.
*/
@@ -98,12 +95,11 @@ struct padata_cpumask {
* struct parallel_data - Internal control structure, covers everything
* that depends on the cpumask in use.
*
- * @pinst: padata instance.
+ * @ps: padata_shell object.
* @pqueue: percpu padata queues used for parallelization.
* @squeue: percpu padata queues used for serialuzation.
- * @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
- * @max_seq_nr: Maximal used sequence number.
+ * @seq_nr: Sequence number of the parallelized data object.
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
@@ -111,30 +107,44 @@ struct padata_cpumask {
* @lock: Reorder lock.
*/
struct parallel_data {
- struct padata_instance *pinst;
+ struct padata_shell *ps;
struct padata_parallel_queue __percpu *pqueue;
struct padata_serial_queue __percpu *squeue;
- atomic_t reorder_objects;
atomic_t refcnt;
atomic_t seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
struct work_struct reorder_work;
- spinlock_t lock ____cacheline_aligned;
+ spinlock_t ____cacheline_aligned lock;
+};
+
+/**
+ * struct padata_shell - Wrapper around struct parallel_data, its
+ * purpose is to allow the underlying control structure to be replaced
+ * on the fly using RCU.
+ *
+ * @pinst: padat instance.
+ * @pd: Actual parallel_data structure which may be substituted on the fly.
+ * @opd: Pointer to old pd to be freed by padata_replace.
+ * @list: List entry in padata_instance list.
+ */
+struct padata_shell {
+ struct padata_instance *pinst;
+ struct parallel_data __rcu *pd;
+ struct parallel_data *opd;
+ struct list_head list;
};
/**
* struct padata_instance - The overall control structure.
*
- * @cpu_notifier: cpu hotplug notifier.
+ * @node: Used by CPU hotplug.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
- * @pd: The internal control structure.
+ * @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @cpumask_change_notifier: Notifiers chain for user-defined notify
- * callbacks that will be called when either @pcpu or @cbcpu
- * or both cpumasks change.
+ * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
@@ -143,9 +153,9 @@ struct padata_instance {
struct hlist_node node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
- struct parallel_data *pd;
+ struct list_head pslist;
struct padata_cpumask cpumask;
- struct blocking_notifier_head cpumask_change_notifier;
+ struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -156,15 +166,13 @@ struct padata_instance {
extern struct padata_instance *padata_alloc_possible(const char *name);
extern void padata_free(struct padata_instance *pinst);
-extern int padata_do_parallel(struct padata_instance *pinst,
+extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
+extern void padata_free_shell(struct padata_shell *ps);
+extern int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
extern int padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
-extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
-extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
#endif
diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h
deleted file mode 100644
index 0471aaf6999b..000000000000
--- a/include/linux/platform_data/crypto-atmel.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_CRYPTO_ATMEL_H
-#define __LINUX_CRYPTO_ATMEL_H
-
-#include <linux/platform_data/dma-atmel.h>
-
-/**
- * struct crypto_dma_data - DMA data for AES/TDES/SHA
- */
-struct crypto_dma_data {
- struct at_dma_slave txdata;
- struct at_dma_slave rxdata;
-};
-
-/**
- * struct crypto_platform_data - board-specific AES/TDES/SHA configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- */
-struct crypto_platform_data {
- struct crypto_dma_data *dma_slave;
-};
-
-#endif /* __LINUX_CRYPTO_ATMEL_H */
diff --git a/include/linux/psp-tee.h b/include/linux/psp-tee.h
new file mode 100644
index 000000000000..cb0c95d6d76b
--- /dev/null
+++ b/include/linux/psp-tee.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef __PSP_TEE_H_
+#define __PSP_TEE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+/* This file defines the Trusted Execution Environment (TEE) interface commands
+ * and the API exported by AMD Secure Processor driver to communicate with
+ * AMD-TEE Trusted OS.
+ */
+
+/**
+ * enum tee_cmd_id - TEE Interface Command IDs
+ * @TEE_CMD_ID_LOAD_TA: Load Trusted Application (TA) binary into
+ * TEE environment
+ * @TEE_CMD_ID_UNLOAD_TA: Unload TA binary from TEE environment
+ * @TEE_CMD_ID_OPEN_SESSION: Open session with loaded TA
+ * @TEE_CMD_ID_CLOSE_SESSION: Close session with loaded TA
+ * @TEE_CMD_ID_INVOKE_CMD: Invoke a command with loaded TA
+ * @TEE_CMD_ID_MAP_SHARED_MEM: Map shared memory
+ * @TEE_CMD_ID_UNMAP_SHARED_MEM: Unmap shared memory
+ */
+enum tee_cmd_id {
+ TEE_CMD_ID_LOAD_TA = 1,
+ TEE_CMD_ID_UNLOAD_TA,
+ TEE_CMD_ID_OPEN_SESSION,
+ TEE_CMD_ID_CLOSE_SESSION,
+ TEE_CMD_ID_INVOKE_CMD,
+ TEE_CMD_ID_MAP_SHARED_MEM,
+ TEE_CMD_ID_UNMAP_SHARED_MEM,
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+/**
+ * psp_tee_process_cmd() - Process command in Trusted Execution Environment
+ * @cmd_id: TEE command ID (&enum tee_cmd_id)
+ * @buf: Command buffer for TEE processing. On success, is updated
+ * with the response
+ * @len: Length of command buffer in bytes
+ * @status: On success, holds the TEE command execution status
+ *
+ * This function submits a command to the Trusted OS for processing in the
+ * TEE environment and waits for a response or until the command times out.
+ *
+ * Returns:
+ * 0 if TEE successfully processed the command
+ * -%ENODEV if PSP device not available
+ * -%EINVAL if invalid input
+ * -%ETIMEDOUT if TEE command timed out
+ * -%EBUSY if PSP device is not responsive
+ */
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status);
+
+/**
+ * psp_check_tee_status() - Checks whether there is a TEE which a driver can
+ * talk to.
+ *
+ * This function can be used by AMD-TEE driver to query if there is TEE with
+ * which it can communicate.
+ *
+ * Returns:
+ * 0 if the device has TEE
+ * -%ENODEV if there is no TEE available
+ */
+int psp_check_tee_status(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf,
+ size_t len, u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline int psp_check_tee_status(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+#endif /* __PSP_TEE_H_ */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 4b9eb064d7e7..6596f3a09e54 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -56,6 +56,7 @@
* TEE Implementation ID
*/
#define TEE_IMPL_ID_OPTEE 1
+#define TEE_IMPL_ID_AMDTEE 2
/*
* OP-TEE specific capabilities
diff --git a/kernel/padata.c b/kernel/padata.c
index c3fec1413295..72777c10bb9c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -2,7 +2,7 @@
/*
* padata.c - generic interface to process data streams in parallel
*
- * See Documentation/padata.txt for an api documentation.
+ * See Documentation/core-api/padata.rst for more information.
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
@@ -35,6 +35,8 @@
#define MAX_OBJ_NUM 1000
+static void padata_free_pd(struct parallel_data *pd);
+
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@@ -87,7 +89,7 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
/**
* padata_do_parallel - padata parallelization function
*
- * @pinst: padata instance
+ * @ps: padatashell
* @padata: object to be parallelized
* @cb_cpu: pointer to the CPU that the serialization callback function should
* run on. If it's not in the serial cpumask of @pinst
@@ -97,17 +99,20 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
* The parallelization callback function will run with BHs off.
* Note: Every object which is parallelized by padata_do_parallel
* must be seen by padata_do_serial.
+ *
+ * Return: 0 on success or else negative error code.
*/
-int padata_do_parallel(struct padata_instance *pinst,
+int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu)
{
+ struct padata_instance *pinst = ps->pinst;
int i, cpu, cpu_index, target_cpu, err;
struct padata_parallel_queue *queue;
struct parallel_data *pd;
rcu_read_lock_bh();
- pd = rcu_dereference_bh(pinst->pd);
+ pd = rcu_dereference_bh(ps->pd);
err = -EINVAL;
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
@@ -160,14 +165,12 @@ EXPORT_SYMBOL(padata_do_parallel);
/*
* padata_find_next - Find the next object that needs serialization.
*
- * Return values are:
- *
- * A pointer to the control struct of the next object that needs
- * serialization, if present in one of the percpu reorder queues.
- *
- * NULL, if the next object that needs serialization will
- * be parallel processed by another cpu and is not yet present in
- * the cpu's reorder queue.
+ * Return:
+ * * A pointer to the control struct of the next object that needs
+ * serialization, if present in one of the percpu reorder queues.
+ * * NULL, if the next object that needs serialization will
+ * be parallel processed by another cpu and is not yet present in
+ * the cpu's reorder queue.
*/
static struct padata_priv *padata_find_next(struct parallel_data *pd,
bool remove_object)
@@ -199,7 +202,6 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
if (remove_object) {
list_del_init(&padata->list);
- atomic_dec(&pd->reorder_objects);
++pd->processed;
pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
}
@@ -210,10 +212,10 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
static void padata_reorder(struct parallel_data *pd)
{
+ struct padata_instance *pinst = pd->ps->pinst;
int cb_cpu;
struct padata_priv *padata;
struct padata_serial_queue *squeue;
- struct padata_instance *pinst = pd->pinst;
struct padata_parallel_queue *next_queue;
/*
@@ -283,6 +285,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
struct padata_serial_queue *squeue;
struct parallel_data *pd;
LIST_HEAD(local_list);
+ int cnt;
local_bh_disable();
squeue = container_of(serial_work, struct padata_serial_queue, work);
@@ -292,6 +295,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_replace_init(&squeue->serial.list, &local_list);
spin_unlock(&squeue->serial.lock);
+ cnt = 0;
+
while (!list_empty(&local_list)) {
struct padata_priv *padata;
@@ -301,9 +306,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_del_init(&padata->list);
padata->serial(padata);
- atomic_dec(&pd->refcnt);
+ cnt++;
}
local_bh_enable();
+
+ if (atomic_sub_and_test(cnt, &pd->refcnt))
+ padata_free_pd(pd);
}
/**
@@ -327,7 +335,6 @@ void padata_do_serial(struct padata_priv *padata)
if (cur->seq_nr < padata->seq_nr)
break;
list_add(&padata->list, &cur->list);
- atomic_inc(&pd->reorder_objects);
spin_unlock(&pqueue->reorder.lock);
/*
@@ -341,36 +348,39 @@ void padata_do_serial(struct padata_priv *padata)
}
EXPORT_SYMBOL(padata_do_serial);
-static int padata_setup_cpumasks(struct parallel_data *pd,
- const struct cpumask *pcpumask,
- const struct cpumask *cbcpumask)
+static int padata_setup_cpumasks(struct padata_instance *pinst)
{
struct workqueue_attrs *attrs;
+ int err;
+
+ attrs = alloc_workqueue_attrs();
+ if (!attrs)
+ return -ENOMEM;
+
+ /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
+ cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
+ err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
+ free_workqueue_attrs(attrs);
+
+ return err;
+}
+
+static int pd_setup_cpumasks(struct parallel_data *pd,
+ const struct cpumask *pcpumask,
+ const struct cpumask *cbcpumask)
+{
int err = -ENOMEM;
if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
goto out;
- cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
-
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
goto free_pcpu_mask;
- cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
- attrs = alloc_workqueue_attrs();
- if (!attrs)
- goto free_cbcpu_mask;
-
- /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
- cpumask_copy(attrs->cpumask, pd->cpumask.pcpu);
- err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs);
- free_workqueue_attrs(attrs);
- if (err < 0)
- goto free_cbcpu_mask;
+ cpumask_copy(pd->cpumask.pcpu, pcpumask);
+ cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
return 0;
-free_cbcpu_mask:
- free_cpumask_var(pd->cpumask.cbcpu);
free_pcpu_mask:
free_cpumask_var(pd->cpumask.pcpu);
out:
@@ -414,12 +424,16 @@ static void padata_init_pqueues(struct parallel_data *pd)
}
/* Allocate and initialize the internal cpumask dependend resources. */
-static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
- const struct cpumask *pcpumask,
- const struct cpumask *cbcpumask)
+static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
{
+ struct padata_instance *pinst = ps->pinst;
+ const struct cpumask *cbcpumask;
+ const struct cpumask *pcpumask;
struct parallel_data *pd;
+ cbcpumask = pinst->rcpumask.cbcpu;
+ pcpumask = pinst->rcpumask.pcpu;
+
pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
if (!pd)
goto err;
@@ -432,15 +446,14 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
if (!pd->squeue)
goto err_free_pqueue;
- pd->pinst = pinst;
- if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
+ pd->ps = ps;
+ if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
goto err_free_squeue;
padata_init_pqueues(pd);
padata_init_squeues(pd);
atomic_set(&pd->seq_nr, -1);
- atomic_set(&pd->reorder_objects, 0);
- atomic_set(&pd->refcnt, 0);
+ atomic_set(&pd->refcnt, 1);
spin_lock_init(&pd->lock);
pd->cpu = cpumask_first(pd->cpumask.pcpu);
INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
@@ -466,29 +479,6 @@ static void padata_free_pd(struct parallel_data *pd)
kfree(pd);
}
-/* Flush all objects out of the padata queues. */
-static void padata_flush_queues(struct parallel_data *pd)
-{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct padata_serial_queue *squeue;
-
- for_each_cpu(cpu, pd->cpumask.pcpu) {
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
- flush_work(&pqueue->work);
- }
-
- if (atomic_read(&pd->reorder_objects))
- padata_reorder(pd);
-
- for_each_cpu(cpu, pd->cpumask.cbcpu) {
- squeue = per_cpu_ptr(pd->squeue, cpu);
- flush_work(&squeue->work);
- }
-
- BUG_ON(atomic_read(&pd->refcnt) != 0);
-}
-
static void __padata_start(struct padata_instance *pinst)
{
pinst->flags |= PADATA_INIT;
@@ -502,72 +492,52 @@ static void __padata_stop(struct padata_instance *pinst)
pinst->flags &= ~PADATA_INIT;
synchronize_rcu();
-
- get_online_cpus();
- padata_flush_queues(pinst->pd);
- put_online_cpus();
}
/* Replace the internal control structure with a new one. */
-static void padata_replace(struct padata_instance *pinst,
- struct parallel_data *pd_new)
+static int padata_replace_one(struct padata_shell *ps)
+{
+ struct parallel_data *pd_new;
+
+ pd_new = padata_alloc_pd(ps);
+ if (!pd_new)
+ return -ENOMEM;
+
+ ps->opd = rcu_dereference_protected(ps->pd, 1);
+ rcu_assign_pointer(ps->pd, pd_new);
+
+ return 0;
+}
+
+static int padata_replace(struct padata_instance *pinst)
{
- struct parallel_data *pd_old = pinst->pd;
- int notification_mask = 0;
+ struct padata_shell *ps;
+ int err;
pinst->flags |= PADATA_RESET;
- rcu_assign_pointer(pinst->pd, pd_new);
+ cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
+ cpu_online_mask);
- synchronize_rcu();
+ cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
+ cpu_online_mask);
- if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
- notification_mask |= PADATA_CPU_PARALLEL;
- if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
- notification_mask |= PADATA_CPU_SERIAL;
+ list_for_each_entry(ps, &pinst->pslist, list) {
+ err = padata_replace_one(ps);
+ if (err)
+ break;
+ }
- padata_flush_queues(pd_old);
- padata_free_pd(pd_old);
+ synchronize_rcu();
- if (notification_mask)
- blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
- notification_mask,
- &pd_new->cpumask);
+ list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
+ if (atomic_dec_and_test(&ps->opd->refcnt))
+ padata_free_pd(ps->opd);
pinst->flags &= ~PADATA_RESET;
-}
-
-/**
- * padata_register_cpumask_notifier - Registers a notifier that will be called
- * if either pcpu or cbcpu or both cpumasks change.
- *
- * @pinst: A poineter to padata instance
- * @nblock: A pointer to notifier block.
- */
-int padata_register_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock)
-{
- return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
- nblock);
-}
-EXPORT_SYMBOL(padata_register_cpumask_notifier);
-/**
- * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
- * registered earlier using padata_register_cpumask_notifier
- *
- * @pinst: A pointer to data instance.
- * @nlock: A pointer to notifier block.
- */
-int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock)
-{
- return blocking_notifier_chain_unregister(
- &pinst->cpumask_change_notifier,
- nblock);
+ return err;
}
-EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
-
/* If cpumask contains no active cpu, we mark the instance as invalid. */
static bool padata_validate_cpumask(struct padata_instance *pinst,
@@ -587,7 +557,7 @@ static int __padata_set_cpumasks(struct padata_instance *pinst,
cpumask_var_t cbcpumask)
{
int valid;
- struct parallel_data *pd;
+ int err;
valid = padata_validate_cpumask(pinst, pcpumask);
if (!valid) {
@@ -600,29 +570,26 @@ static int __padata_set_cpumasks(struct padata_instance *pinst,
__padata_stop(pinst);
out_replace:
- pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
- if (!pd)
- return -ENOMEM;
-
cpumask_copy(pinst->cpumask.pcpu, pcpumask);
cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
- padata_replace(pinst, pd);
+ err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
if (valid)
__padata_start(pinst);
- return 0;
+ return err;
}
/**
- * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
- * equivalent to @cpumask.
- *
+ * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
+ * equivalent to @cpumask.
* @pinst: padata instance
* @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
* to parallel and serial cpumasks respectively.
* @cpumask: the cpumask to use
+ *
+ * Return: 0 on success or negative error code
*/
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask)
@@ -630,8 +597,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
struct cpumask *serial_mask, *parallel_mask;
int err = -EINVAL;
- mutex_lock(&pinst->lock);
get_online_cpus();
+ mutex_lock(&pinst->lock);
switch (cpumask_type) {
case PADATA_CPU_PARALLEL:
@@ -649,8 +616,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
out:
- put_online_cpus();
mutex_unlock(&pinst->lock);
+ put_online_cpus();
return err;
}
@@ -660,6 +627,8 @@ EXPORT_SYMBOL(padata_set_cpumask);
* padata_start - start the parallel processing
*
* @pinst: padata instance to start
+ *
+ * Return: 0 on success or negative error code
*/
int padata_start(struct padata_instance *pinst)
{
@@ -695,82 +664,33 @@ EXPORT_SYMBOL(padata_stop);
static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
- struct parallel_data *pd;
+ int err = 0;
if (cpumask_test_cpu(cpu, cpu_online_mask)) {
- pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
- pinst->cpumask.cbcpu);
- if (!pd)
- return -ENOMEM;
-
- padata_replace(pinst, pd);
+ err = padata_replace(pinst);
if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
__padata_start(pinst);
}
- return 0;
+ return err;
}
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
- struct parallel_data *pd = NULL;
-
- if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+ int err = 0;
+ if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
!padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
__padata_stop(pinst);
- pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
- pinst->cpumask.cbcpu);
- if (!pd)
- return -ENOMEM;
-
- padata_replace(pinst, pd);
-
- cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
- cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
+ err = padata_replace(pinst);
}
- return 0;
-}
-
- /**
- * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
- * padata cpumasks.
- *
- * @pinst: padata instance
- * @cpu: cpu to remove
- * @mask: bitmask specifying from which cpumask @cpu should be removed
- * The @mask may be any combination of the following flags:
- * PADATA_CPU_SERIAL - serial cpumask
- * PADATA_CPU_PARALLEL - parallel cpumask
- */
-int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
-{
- int err;
-
- if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
- return -EINVAL;
-
- mutex_lock(&pinst->lock);
-
- get_online_cpus();
- if (mask & PADATA_CPU_SERIAL)
- cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
- if (mask & PADATA_CPU_PARALLEL)
- cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
-
- err = __padata_remove_cpu(pinst, cpu);
- put_online_cpus();
-
- mutex_unlock(&pinst->lock);
-
return err;
}
-EXPORT_SYMBOL(padata_remove_cpu);
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
{
@@ -793,7 +713,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
return ret;
}
-static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
+static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct padata_instance *pinst;
int ret;
@@ -814,11 +734,15 @@ static enum cpuhp_state hp_online;
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
+ cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
#endif
+ WARN_ON(!list_empty(&pinst->pslist));
+
padata_stop(pinst);
- padata_free_pd(pinst->pd);
+ free_cpumask_var(pinst->rcpumask.cbcpu);
+ free_cpumask_var(pinst->rcpumask.pcpu);
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
destroy_workqueue(pinst->serial_wq);
@@ -959,13 +883,14 @@ static struct kobj_type padata_attr_type = {
* @name: used to identify the instance
* @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization
+ *
+ * Return: new instance on success, NULL on error
*/
static struct padata_instance *padata_alloc(const char *name,
const struct cpumask *pcpumask,
const struct cpumask *cbcpumask)
{
struct padata_instance *pinst;
- struct parallel_data *pd = NULL;
pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
if (!pinst)
@@ -993,29 +918,40 @@ static struct padata_instance *padata_alloc(const char *name,
!padata_validate_cpumask(pinst, cbcpumask))
goto err_free_masks;
- pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
- if (!pd)
+ if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
goto err_free_masks;
+ if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
+ goto err_free_rcpumask_pcpu;
- rcu_assign_pointer(pinst->pd, pd);
+ INIT_LIST_HEAD(&pinst->pslist);
cpumask_copy(pinst->cpumask.pcpu, pcpumask);
cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
+ cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
+ cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
+
+ if (padata_setup_cpumasks(pinst))
+ goto err_free_rcpumask_cbcpu;
pinst->flags = 0;
- BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
kobject_init(&pinst->kobj, &padata_attr_type);
mutex_init(&pinst->lock);
#ifdef CONFIG_HOTPLUG_CPU
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
+ cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
+ &pinst->node);
#endif
put_online_cpus();
return pinst;
+err_free_rcpumask_cbcpu:
+ free_cpumask_var(pinst->rcpumask.cbcpu);
+err_free_rcpumask_pcpu:
+ free_cpumask_var(pinst->rcpumask.pcpu);
err_free_masks:
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
@@ -1036,6 +972,8 @@ err:
* parallel workers.
*
* @name: used to identify the instance
+ *
+ * Return: new instance on success, NULL on error
*/
struct padata_instance *padata_alloc_possible(const char *name)
{
@@ -1046,7 +984,7 @@ EXPORT_SYMBOL(padata_alloc_possible);
/**
* padata_free - free a padata instance
*
- * @padata_inst: padata instance to free
+ * @pinst: padata instance to free
*/
void padata_free(struct padata_instance *pinst)
{
@@ -1054,6 +992,63 @@ void padata_free(struct padata_instance *pinst)
}
EXPORT_SYMBOL(padata_free);
+/**
+ * padata_alloc_shell - Allocate and initialize padata shell.
+ *
+ * @pinst: Parent padata_instance object.
+ *
+ * Return: new shell on success, NULL on error
+ */
+struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
+{
+ struct parallel_data *pd;
+ struct padata_shell *ps;
+
+ ps = kzalloc(sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ goto out;
+
+ ps->pinst = pinst;
+
+ get_online_cpus();
+ pd = padata_alloc_pd(ps);
+ put_online_cpus();
+
+ if (!pd)
+ goto out_free_ps;
+
+ mutex_lock(&pinst->lock);
+ RCU_INIT_POINTER(ps->pd, pd);
+ list_add(&ps->list, &pinst->pslist);
+ mutex_unlock(&pinst->lock);
+
+ return ps;
+
+out_free_ps:
+ kfree(ps);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(padata_alloc_shell);
+
+/**
+ * padata_free_shell - free a padata shell
+ *
+ * @ps: padata shell to free
+ */
+void padata_free_shell(struct padata_shell *ps)
+{
+ struct padata_instance *pinst = ps->pinst;
+
+ mutex_lock(&pinst->lock);
+ list_del(&ps->list);
+ padata_free_pd(rcu_dereference_protected(ps->pd, 1));
+ mutex_unlock(&pinst->lock);
+
+ kfree(ps);
+}
+EXPORT_SYMBOL(padata_free_shell);
+
#ifdef CONFIG_HOTPLUG_CPU
static __init int padata_driver_init(void)
@@ -1061,17 +1056,24 @@ static __init int padata_driver_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
- padata_cpu_online,
- padata_cpu_prep_down);
+ padata_cpu_online, NULL);
if (ret < 0)
return ret;
hp_online = ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
+ NULL, padata_cpu_dead);
+ if (ret < 0) {
+ cpuhp_remove_multi_state(hp_online);
+ return ret;
+ }
return 0;
}
module_init(padata_driver_init);
static __exit void padata_driver_exit(void)
{
+ cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
cpuhp_remove_multi_state(hp_online);
}
module_exit(padata_driver_exit);
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 0b2c4fce26d9..14c032de276e 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -90,7 +90,7 @@ config CRYPTO_LIB_DES
config CRYPTO_LIB_POLY1305_RSIZE
int
default 2 if MIPS
- default 4 if X86_64
+ default 11 if X86_64
default 9 if ARM || ARM64
default 1
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 34a701ab8b92..3a435629d9ce 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -19,16 +19,21 @@ libblake2s-y += blake2s.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
libchacha20poly1305-y += chacha20poly1305.o
-obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519.o
-libcurve25519-y := curve25519-fiat32.o
-libcurve25519-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o
+libcurve25519-generic-y := curve25519-fiat32.o
+libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
+libcurve25519-generic-y += curve25519-generic.o
+
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o
libcurve25519-y += curve25519.o
obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
libdes-y := des.o
obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o
-libpoly1305-y := poly1305.o
+libpoly1305-y := poly1305-donna32.o
+libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
+libpoly1305-y += poly1305.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
@@ -36,4 +41,5 @@ libsha256-y := sha256.o
ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
libblake2s-y += blake2s-selftest.o
libchacha20poly1305-y += chacha20poly1305-selftest.o
+libcurve25519-y += curve25519-selftest.o
endif
diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c
index 465de46dbdef..c391a91364e9 100644
--- a/lib/crypto/chacha20poly1305-selftest.c
+++ b/lib/crypto/chacha20poly1305-selftest.c
@@ -4,6 +4,7 @@
*/
#include <crypto/chacha20poly1305.h>
+#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <asm/unaligned.h>
@@ -1926,6 +1927,1104 @@ static const u8 enc_key012[] __initconst = {
0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
};
+/* wycheproof - rfc7539 */
+static const u8 enc_input013[] __initconst = {
+ 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61,
+ 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c,
+ 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39,
+ 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63,
+ 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66,
+ 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20,
+ 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75,
+ 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73,
+ 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f,
+ 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69,
+ 0x74, 0x2e
+};
+static const u8 enc_output013[] __initconst = {
+ 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb,
+ 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2,
+ 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe,
+ 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6,
+ 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12,
+ 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b,
+ 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29,
+ 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36,
+ 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c,
+ 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58,
+ 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94,
+ 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc,
+ 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d,
+ 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b,
+ 0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09,
+ 0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60,
+ 0x06, 0x91
+};
+static const u8 enc_assoc013[] __initconst = {
+ 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7
+};
+static const u8 enc_nonce013[] __initconst = {
+ 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43,
+ 0x44, 0x45, 0x46, 0x47
+};
+static const u8 enc_key013[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input014[] __initconst = { };
+static const u8 enc_output014[] __initconst = {
+ 0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5,
+ 0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d
+};
+static const u8 enc_assoc014[] __initconst = { };
+static const u8 enc_nonce014[] __initconst = {
+ 0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1,
+ 0xea, 0x12, 0x37, 0x9d
+};
+static const u8 enc_key014[] __initconst = {
+ 0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96,
+ 0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0,
+ 0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08,
+ 0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0
+};
+
+/* wycheproof - misc */
+static const u8 enc_input015[] __initconst = { };
+static const u8 enc_output015[] __initconst = {
+ 0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b,
+ 0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09
+};
+static const u8 enc_assoc015[] __initconst = {
+ 0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10
+};
+static const u8 enc_nonce015[] __initconst = {
+ 0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16,
+ 0xa3, 0xc6, 0xf6, 0x89
+};
+static const u8 enc_key015[] __initconst = {
+ 0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb,
+ 0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22,
+ 0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63,
+ 0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42
+};
+
+/* wycheproof - misc */
+static const u8 enc_input016[] __initconst = {
+ 0x2a
+};
+static const u8 enc_output016[] __initconst = {
+ 0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80,
+ 0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75,
+ 0x22
+};
+static const u8 enc_assoc016[] __initconst = { };
+static const u8 enc_nonce016[] __initconst = {
+ 0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd,
+ 0xee, 0xab, 0x60, 0xf1
+};
+static const u8 enc_key016[] __initconst = {
+ 0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50,
+ 0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa,
+ 0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a,
+ 0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73
+};
+
+/* wycheproof - misc */
+static const u8 enc_input017[] __initconst = {
+ 0x51
+};
+static const u8 enc_output017[] __initconst = {
+ 0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7,
+ 0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72,
+ 0xb9
+};
+static const u8 enc_assoc017[] __initconst = {
+ 0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53
+};
+static const u8 enc_nonce017[] __initconst = {
+ 0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2,
+ 0x78, 0x2f, 0x44, 0x03
+};
+static const u8 enc_key017[] __initconst = {
+ 0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5,
+ 0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f,
+ 0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5,
+ 0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee
+};
+
+/* wycheproof - misc */
+static const u8 enc_input018[] __initconst = {
+ 0x5c, 0x60
+};
+static const u8 enc_output018[] __initconst = {
+ 0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39,
+ 0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22,
+ 0xe5, 0xe2
+};
+static const u8 enc_assoc018[] __initconst = { };
+static const u8 enc_nonce018[] __initconst = {
+ 0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34,
+ 0x7e, 0x03, 0xf2, 0xdb
+};
+static const u8 enc_key018[] __initconst = {
+ 0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89,
+ 0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e,
+ 0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f,
+ 0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00
+};
+
+/* wycheproof - misc */
+static const u8 enc_input019[] __initconst = {
+ 0xdd, 0xf2
+};
+static const u8 enc_output019[] __initconst = {
+ 0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec,
+ 0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24,
+ 0x37, 0xa2
+};
+static const u8 enc_assoc019[] __initconst = {
+ 0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf
+};
+static const u8 enc_nonce019[] __initconst = {
+ 0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90,
+ 0xb6, 0x04, 0x0a, 0xc6
+};
+static const u8 enc_key019[] __initconst = {
+ 0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48,
+ 0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff,
+ 0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44,
+ 0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58
+};
+
+/* wycheproof - misc */
+static const u8 enc_input020[] __initconst = {
+ 0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31
+};
+static const u8 enc_output020[] __initconst = {
+ 0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed,
+ 0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28,
+ 0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42
+};
+static const u8 enc_assoc020[] __initconst = { };
+static const u8 enc_nonce020[] __initconst = {
+ 0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59,
+ 0x6d, 0xc5, 0x5b, 0xb7
+};
+static const u8 enc_key020[] __initconst = {
+ 0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f,
+ 0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f,
+ 0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3,
+ 0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7
+};
+
+/* wycheproof - misc */
+static const u8 enc_input021[] __initconst = {
+ 0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90
+};
+static const u8 enc_output021[] __initconst = {
+ 0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18,
+ 0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5,
+ 0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba
+};
+static const u8 enc_assoc021[] __initconst = {
+ 0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53
+};
+static const u8 enc_nonce021[] __initconst = {
+ 0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98,
+ 0xde, 0x94, 0x83, 0x96
+};
+static const u8 enc_key021[] __initconst = {
+ 0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5,
+ 0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4,
+ 0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda,
+ 0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input022[] __initconst = {
+ 0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed
+};
+static const u8 enc_output022[] __initconst = {
+ 0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17,
+ 0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93,
+ 0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21
+};
+static const u8 enc_assoc022[] __initconst = { };
+static const u8 enc_nonce022[] __initconst = {
+ 0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2,
+ 0x36, 0x18, 0x23, 0xd3
+};
+static const u8 enc_key022[] __initconst = {
+ 0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf,
+ 0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d,
+ 0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7,
+ 0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52
+};
+
+/* wycheproof - misc */
+static const u8 enc_input023[] __initconst = {
+ 0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf
+};
+static const u8 enc_output023[] __initconst = {
+ 0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0,
+ 0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0,
+ 0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18
+};
+static const u8 enc_assoc023[] __initconst = {
+ 0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d
+};
+static const u8 enc_nonce023[] __initconst = {
+ 0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33,
+ 0x66, 0x48, 0x4a, 0x78
+};
+static const u8 enc_key023[] __initconst = {
+ 0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54,
+ 0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a,
+ 0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe,
+ 0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd
+};
+
+/* wycheproof - misc */
+static const u8 enc_input024[] __initconst = {
+ 0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c,
+ 0x36, 0x8d, 0x14, 0xe0
+};
+static const u8 enc_output024[] __initconst = {
+ 0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a,
+ 0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27,
+ 0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10,
+ 0x6d, 0xcb, 0x29, 0xb4
+};
+static const u8 enc_assoc024[] __initconst = { };
+static const u8 enc_nonce024[] __initconst = {
+ 0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e,
+ 0x07, 0x53, 0x86, 0x56
+};
+static const u8 enc_key024[] __initconst = {
+ 0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0,
+ 0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39,
+ 0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5,
+ 0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe
+};
+
+/* wycheproof - misc */
+static const u8 enc_input025[] __initconst = {
+ 0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7,
+ 0x59, 0xb1, 0xa0, 0xda
+};
+static const u8 enc_output025[] __initconst = {
+ 0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38,
+ 0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c,
+ 0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7,
+ 0xcf, 0x05, 0x07, 0x2f
+};
+static const u8 enc_assoc025[] __initconst = {
+ 0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9
+};
+static const u8 enc_nonce025[] __initconst = {
+ 0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d,
+ 0xd9, 0x06, 0xe9, 0xce
+};
+static const u8 enc_key025[] __initconst = {
+ 0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40,
+ 0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70,
+ 0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e,
+ 0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57
+};
+
+/* wycheproof - misc */
+static const u8 enc_input026[] __initconst = {
+ 0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac,
+ 0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07
+};
+static const u8 enc_output026[] __initconst = {
+ 0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf,
+ 0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a,
+ 0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79,
+ 0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2
+};
+static const u8 enc_assoc026[] __initconst = { };
+static const u8 enc_nonce026[] __initconst = {
+ 0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda,
+ 0xd4, 0x61, 0xd2, 0x3c
+};
+static const u8 enc_key026[] __initconst = {
+ 0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda,
+ 0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77,
+ 0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0,
+ 0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb
+};
+
+/* wycheproof - misc */
+static const u8 enc_input027[] __initconst = {
+ 0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f,
+ 0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73
+};
+static const u8 enc_output027[] __initconst = {
+ 0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81,
+ 0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe,
+ 0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9,
+ 0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17
+};
+static const u8 enc_assoc027[] __initconst = {
+ 0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12
+};
+static const u8 enc_nonce027[] __initconst = {
+ 0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14,
+ 0xc5, 0x03, 0x5b, 0x6a
+};
+static const u8 enc_key027[] __initconst = {
+ 0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f,
+ 0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38,
+ 0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b,
+ 0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d
+};
+
+/* wycheproof - misc */
+static const u8 enc_input028[] __initconst = {
+ 0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0,
+ 0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88
+};
+static const u8 enc_output028[] __initconst = {
+ 0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4,
+ 0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13,
+ 0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a,
+ 0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c
+};
+static const u8 enc_assoc028[] __initconst = { };
+static const u8 enc_nonce028[] __initconst = {
+ 0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8,
+ 0x47, 0x40, 0xad, 0x9b
+};
+static const u8 enc_key028[] __initconst = {
+ 0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7,
+ 0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7,
+ 0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63,
+ 0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a
+};
+
+/* wycheproof - misc */
+static const u8 enc_input029[] __initconst = {
+ 0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09,
+ 0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6
+};
+static const u8 enc_output029[] __initconst = {
+ 0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3,
+ 0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c,
+ 0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc,
+ 0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d
+};
+static const u8 enc_assoc029[] __initconst = {
+ 0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff
+};
+static const u8 enc_nonce029[] __initconst = {
+ 0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80,
+ 0x07, 0x1f, 0x52, 0x66
+};
+static const u8 enc_key029[] __initconst = {
+ 0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8,
+ 0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14,
+ 0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d,
+ 0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input030[] __initconst = {
+ 0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15,
+ 0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e,
+ 0x1f
+};
+static const u8 enc_output030[] __initconst = {
+ 0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd,
+ 0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74,
+ 0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80,
+ 0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08,
+ 0x13
+};
+static const u8 enc_assoc030[] __initconst = { };
+static const u8 enc_nonce030[] __initconst = {
+ 0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42,
+ 0xdc, 0x03, 0x44, 0x5d
+};
+static const u8 enc_key030[] __initconst = {
+ 0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21,
+ 0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e,
+ 0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b,
+ 0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11
+};
+
+/* wycheproof - misc */
+static const u8 enc_input031[] __initconst = {
+ 0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88,
+ 0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c,
+ 0x9c
+};
+static const u8 enc_output031[] __initconst = {
+ 0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b,
+ 0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e,
+ 0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38,
+ 0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c,
+ 0xeb
+};
+static const u8 enc_assoc031[] __initconst = {
+ 0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79
+};
+static const u8 enc_nonce031[] __initconst = {
+ 0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10,
+ 0x63, 0x3d, 0x99, 0x3d
+};
+static const u8 enc_key031[] __initconst = {
+ 0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7,
+ 0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f,
+ 0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82,
+ 0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70
+};
+
+/* wycheproof - misc */
+static const u8 enc_input032[] __initconst = {
+ 0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66,
+ 0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7,
+ 0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11
+};
+static const u8 enc_output032[] __initconst = {
+ 0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d,
+ 0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0,
+ 0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64,
+ 0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d,
+ 0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a
+};
+static const u8 enc_assoc032[] __initconst = { };
+static const u8 enc_nonce032[] __initconst = {
+ 0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76,
+ 0x2c, 0x65, 0xf3, 0x1b
+};
+static const u8 enc_key032[] __initconst = {
+ 0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87,
+ 0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f,
+ 0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2,
+ 0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7
+};
+
+/* wycheproof - misc */
+static const u8 enc_input033[] __initconst = {
+ 0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d,
+ 0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c,
+ 0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93
+};
+static const u8 enc_output033[] __initconst = {
+ 0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69,
+ 0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4,
+ 0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00,
+ 0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63,
+ 0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65
+};
+static const u8 enc_assoc033[] __initconst = {
+ 0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08
+};
+static const u8 enc_nonce033[] __initconst = {
+ 0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd,
+ 0x78, 0x34, 0xed, 0x55
+};
+static const u8 enc_key033[] __initconst = {
+ 0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed,
+ 0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda,
+ 0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d,
+ 0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3
+};
+
+/* wycheproof - misc */
+static const u8 enc_input034[] __initconst = {
+ 0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f,
+ 0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c,
+ 0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26,
+ 0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29
+};
+static const u8 enc_output034[] __initconst = {
+ 0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab,
+ 0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb,
+ 0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1,
+ 0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56,
+ 0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f,
+ 0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93
+};
+static const u8 enc_assoc034[] __initconst = { };
+static const u8 enc_nonce034[] __initconst = {
+ 0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31,
+ 0x37, 0x1a, 0x6f, 0xd2
+};
+static const u8 enc_key034[] __initconst = {
+ 0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e,
+ 0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2,
+ 0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15,
+ 0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71
+};
+
+/* wycheproof - misc */
+static const u8 enc_input035[] __initconst = {
+ 0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2,
+ 0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f,
+ 0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56,
+ 0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb
+};
+static const u8 enc_output035[] __initconst = {
+ 0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20,
+ 0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5,
+ 0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e,
+ 0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53,
+ 0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d,
+ 0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f
+};
+static const u8 enc_assoc035[] __initconst = {
+ 0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48
+};
+static const u8 enc_nonce035[] __initconst = {
+ 0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8,
+ 0xb8, 0x1a, 0x1f, 0x8b
+};
+static const u8 enc_key035[] __initconst = {
+ 0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f,
+ 0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40,
+ 0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4,
+ 0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4
+};
+
+/* wycheproof - misc */
+static const u8 enc_input036[] __initconst = {
+ 0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a,
+ 0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb,
+ 0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce,
+ 0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78
+};
+static const u8 enc_output036[] __initconst = {
+ 0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76,
+ 0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e,
+ 0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50,
+ 0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21,
+ 0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33,
+ 0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea
+};
+static const u8 enc_assoc036[] __initconst = { };
+static const u8 enc_nonce036[] __initconst = {
+ 0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2,
+ 0x2b, 0x7e, 0x6e, 0x6a
+};
+static const u8 enc_key036[] __initconst = {
+ 0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c,
+ 0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb,
+ 0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9,
+ 0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3
+};
+
+/* wycheproof - misc */
+static const u8 enc_input037[] __initconst = {
+ 0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14,
+ 0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3,
+ 0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79,
+ 0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e
+};
+static const u8 enc_output037[] __initconst = {
+ 0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b,
+ 0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2,
+ 0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29,
+ 0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4,
+ 0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d,
+ 0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32
+};
+static const u8 enc_assoc037[] __initconst = {
+ 0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59
+};
+static const u8 enc_nonce037[] __initconst = {
+ 0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5,
+ 0x34, 0x0d, 0xd1, 0xb8
+};
+static const u8 enc_key037[] __initconst = {
+ 0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4,
+ 0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f,
+ 0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd,
+ 0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45
+};
+
+/* wycheproof - misc */
+static const u8 enc_input038[] __initconst = {
+ 0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4,
+ 0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e,
+ 0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11,
+ 0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04,
+ 0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46,
+ 0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a
+};
+static const u8 enc_output038[] __initconst = {
+ 0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6,
+ 0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00,
+ 0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6,
+ 0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27,
+ 0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24,
+ 0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f,
+ 0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08,
+ 0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9
+};
+static const u8 enc_assoc038[] __initconst = { };
+static const u8 enc_nonce038[] __initconst = {
+ 0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9,
+ 0x0b, 0xef, 0x55, 0xd2
+};
+static const u8 enc_key038[] __initconst = {
+ 0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51,
+ 0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63,
+ 0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3,
+ 0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64
+};
+
+/* wycheproof - misc */
+static const u8 enc_input039[] __initconst = {
+ 0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74,
+ 0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3,
+ 0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d,
+ 0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59,
+ 0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c,
+ 0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9
+};
+static const u8 enc_output039[] __initconst = {
+ 0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec,
+ 0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04,
+ 0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e,
+ 0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d,
+ 0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3,
+ 0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4,
+ 0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42,
+ 0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3
+};
+static const u8 enc_assoc039[] __initconst = {
+ 0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84
+};
+static const u8 enc_nonce039[] __initconst = {
+ 0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b,
+ 0x31, 0xcd, 0x4d, 0x95
+};
+static const u8 enc_key039[] __initconst = {
+ 0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c,
+ 0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25,
+ 0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4,
+ 0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac
+};
+
+/* wycheproof - misc */
+static const u8 enc_input040[] __initconst = {
+ 0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e,
+ 0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd,
+ 0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f,
+ 0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f,
+ 0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88,
+ 0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76,
+ 0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d,
+ 0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82
+};
+static const u8 enc_output040[] __initconst = {
+ 0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5,
+ 0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8,
+ 0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c,
+ 0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8,
+ 0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc,
+ 0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33,
+ 0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd,
+ 0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50,
+ 0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56,
+ 0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13
+};
+static const u8 enc_assoc040[] __initconst = { };
+static const u8 enc_nonce040[] __initconst = {
+ 0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28,
+ 0x23, 0xcc, 0x06, 0x5b
+};
+static const u8 enc_key040[] __initconst = {
+ 0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0,
+ 0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8,
+ 0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30,
+ 0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01
+};
+
+/* wycheproof - misc */
+static const u8 enc_input041[] __initconst = {
+ 0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88,
+ 0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d,
+ 0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19,
+ 0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44,
+ 0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec,
+ 0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d,
+ 0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c,
+ 0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17
+};
+static const u8 enc_output041[] __initconst = {
+ 0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16,
+ 0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1,
+ 0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8,
+ 0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97,
+ 0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84,
+ 0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3,
+ 0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20,
+ 0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e,
+ 0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14,
+ 0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec
+};
+static const u8 enc_assoc041[] __initconst = {
+ 0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2
+};
+static const u8 enc_nonce041[] __initconst = {
+ 0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d,
+ 0xe4, 0xeb, 0xb1, 0x9c
+};
+static const u8 enc_key041[] __initconst = {
+ 0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9,
+ 0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2,
+ 0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5,
+ 0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input042[] __initconst = {
+ 0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba,
+ 0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58,
+ 0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06,
+ 0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64,
+ 0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5,
+ 0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74,
+ 0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61,
+ 0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e,
+ 0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6,
+ 0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd,
+ 0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4,
+ 0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5,
+ 0x92
+};
+static const u8 enc_output042[] __initconst = {
+ 0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97,
+ 0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf,
+ 0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98,
+ 0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5,
+ 0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45,
+ 0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10,
+ 0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28,
+ 0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe,
+ 0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c,
+ 0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a,
+ 0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2,
+ 0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3,
+ 0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b,
+ 0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b,
+ 0xb0
+};
+static const u8 enc_assoc042[] __initconst = { };
+static const u8 enc_nonce042[] __initconst = {
+ 0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03,
+ 0xac, 0xde, 0x27, 0x99
+};
+static const u8 enc_key042[] __initconst = {
+ 0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28,
+ 0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d,
+ 0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a,
+ 0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01
+};
+
+/* wycheproof - misc */
+static const u8 enc_input043[] __initconst = {
+ 0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff,
+ 0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc,
+ 0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc,
+ 0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5,
+ 0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd,
+ 0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55,
+ 0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85,
+ 0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b,
+ 0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3,
+ 0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad,
+ 0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92,
+ 0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31,
+ 0x76
+};
+static const u8 enc_output043[] __initconst = {
+ 0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5,
+ 0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06,
+ 0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e,
+ 0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae,
+ 0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37,
+ 0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8,
+ 0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3,
+ 0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d,
+ 0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70,
+ 0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07,
+ 0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6,
+ 0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54,
+ 0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5,
+ 0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac,
+ 0xf6
+};
+static const u8 enc_assoc043[] __initconst = {
+ 0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30
+};
+static const u8 enc_nonce043[] __initconst = {
+ 0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63,
+ 0xe5, 0x22, 0x94, 0x60
+};
+static const u8 enc_key043[] __initconst = {
+ 0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c,
+ 0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4,
+ 0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b,
+ 0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input044[] __initconst = {
+ 0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68,
+ 0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2
+};
+static const u8 enc_output044[] __initconst = {
+ 0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6,
+ 0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b,
+ 0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02,
+ 0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4
+};
+static const u8 enc_assoc044[] __initconst = {
+ 0x02
+};
+static const u8 enc_nonce044[] __initconst = {
+ 0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21,
+ 0x02, 0xd5, 0x06, 0x56
+};
+static const u8 enc_key044[] __initconst = {
+ 0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32,
+ 0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50,
+ 0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0,
+ 0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c
+};
+
+/* wycheproof - misc */
+static const u8 enc_input045[] __initconst = {
+ 0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44,
+ 0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8
+};
+static const u8 enc_output045[] __initconst = {
+ 0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1,
+ 0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60,
+ 0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1,
+ 0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58
+};
+static const u8 enc_assoc045[] __initconst = {
+ 0xb6, 0x48
+};
+static const u8 enc_nonce045[] __initconst = {
+ 0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9,
+ 0x5b, 0x3a, 0xa7, 0x13
+};
+static const u8 enc_key045[] __initconst = {
+ 0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41,
+ 0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0,
+ 0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44,
+ 0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc
+};
+
+/* wycheproof - misc */
+static const u8 enc_input046[] __initconst = {
+ 0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23,
+ 0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47
+};
+static const u8 enc_output046[] __initconst = {
+ 0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda,
+ 0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1,
+ 0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b,
+ 0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8
+};
+static const u8 enc_assoc046[] __initconst = {
+ 0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd,
+ 0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0
+};
+static const u8 enc_nonce046[] __initconst = {
+ 0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b,
+ 0xa4, 0x65, 0x96, 0xdf
+};
+static const u8 enc_key046[] __initconst = {
+ 0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08,
+ 0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96,
+ 0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89,
+ 0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input047[] __initconst = {
+ 0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf,
+ 0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2
+};
+static const u8 enc_output047[] __initconst = {
+ 0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb,
+ 0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95,
+ 0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34,
+ 0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f
+};
+static const u8 enc_assoc047[] __initconst = {
+ 0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07,
+ 0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b
+};
+static const u8 enc_nonce047[] __initconst = {
+ 0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6,
+ 0x80, 0x92, 0x66, 0xd9
+};
+static const u8 enc_key047[] __initconst = {
+ 0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91,
+ 0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23,
+ 0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6,
+ 0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16
+};
+
+/* wycheproof - misc */
+static const u8 enc_input048[] __initconst = {
+ 0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32,
+ 0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3
+};
+static const u8 enc_output048[] __initconst = {
+ 0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b,
+ 0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68,
+ 0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84,
+ 0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3
+};
+static const u8 enc_assoc048[] __initconst = {
+ 0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab,
+ 0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c,
+ 0x0e
+};
+static const u8 enc_nonce048[] __initconst = {
+ 0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40,
+ 0xfc, 0x10, 0x68, 0xc3
+};
+static const u8 enc_key048[] __initconst = {
+ 0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b,
+ 0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97,
+ 0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b,
+ 0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57
+};
+
+/* wycheproof - misc */
+static const u8 enc_input049[] __initconst = {
+ 0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2,
+ 0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6
+};
+static const u8 enc_output049[] __initconst = {
+ 0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87,
+ 0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11,
+ 0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e,
+ 0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6
+};
+static const u8 enc_assoc049[] __initconst = {
+ 0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8,
+ 0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94,
+ 0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5,
+ 0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda
+};
+static const u8 enc_nonce049[] __initconst = {
+ 0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf,
+ 0xad, 0x14, 0xd5, 0x3e
+};
+static const u8 enc_key049[] __initconst = {
+ 0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d,
+ 0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc,
+ 0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47,
+ 0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0
+};
+
+/* wycheproof - misc */
+static const u8 enc_input050[] __initconst = {
+ 0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18,
+ 0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c
+};
+static const u8 enc_output050[] __initconst = {
+ 0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6,
+ 0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca,
+ 0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b,
+ 0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04
+};
+static const u8 enc_assoc050[] __initconst = {
+ 0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22,
+ 0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07,
+ 0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90,
+ 0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37
+};
+static const u8 enc_nonce050[] __initconst = {
+ 0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28,
+ 0x6a, 0x7b, 0x76, 0x51
+};
+static const u8 enc_key050[] __initconst = {
+ 0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1,
+ 0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c,
+ 0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a,
+ 0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30
+};
+
+/* wycheproof - misc */
+static const u8 enc_input051[] __initconst = {
+ 0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59,
+ 0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6
+};
+static const u8 enc_output051[] __initconst = {
+ 0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70,
+ 0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd,
+ 0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4,
+ 0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43
+};
+static const u8 enc_assoc051[] __initconst = {
+ 0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92,
+ 0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57,
+ 0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75,
+ 0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88,
+ 0x73
+};
+static const u8 enc_nonce051[] __initconst = {
+ 0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0,
+ 0xa8, 0xfa, 0x89, 0x49
+};
+static const u8 enc_key051[] __initconst = {
+ 0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27,
+ 0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74,
+ 0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c,
+ 0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99
+};
+
+/* wycheproof - misc */
+static const u8 enc_input052[] __initconst = {
+ 0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3,
+ 0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed
+};
+static const u8 enc_output052[] __initconst = {
+ 0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc,
+ 0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b,
+ 0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed,
+ 0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32
+};
+static const u8 enc_assoc052[] __initconst = {
+ 0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a,
+ 0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14,
+ 0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae,
+ 0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c,
+ 0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92,
+ 0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61
+};
+static const u8 enc_nonce052[] __initconst = {
+ 0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73,
+ 0x0e, 0xc3, 0x5d, 0x12
+};
+static const u8 enc_key052[] __initconst = {
+ 0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5,
+ 0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf,
+ 0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c,
+ 0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4
+};
+
/* wycheproof - misc */
static const u8 enc_input053[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
@@ -2760,6 +3859,126 @@ static const u8 enc_key073[] __initconst = {
};
/* wycheproof - checking for int overflows */
+static const u8 enc_input074[] __initconst = {
+ 0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51,
+ 0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69,
+ 0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f,
+ 0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90,
+ 0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0,
+ 0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac,
+ 0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2,
+ 0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5,
+ 0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b,
+ 0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49,
+ 0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16,
+ 0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58,
+ 0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73,
+ 0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3,
+ 0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c,
+ 0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a
+};
+static const u8 enc_output074[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85,
+ 0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca
+};
+static const u8 enc_assoc074[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce074[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x00, 0x02, 0x50, 0x6e
+};
+static const u8 enc_key074[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input075[] __initconst = {
+ 0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75,
+ 0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56,
+ 0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2,
+ 0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed,
+ 0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f,
+ 0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f,
+ 0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0,
+ 0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8,
+ 0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32,
+ 0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8,
+ 0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b,
+ 0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b,
+ 0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd,
+ 0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f,
+ 0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1,
+ 0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94
+};
+static const u8 enc_output075[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7,
+ 0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5
+};
+static const u8 enc_assoc075[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce075[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x00, 0x03, 0x18, 0xa5
+};
+static const u8 enc_key075[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
static const u8 enc_input076[] __initconst = {
0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85,
0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02,
@@ -3349,6 +4568,286 @@ static const u8 enc_key085[] __initconst = {
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
+/* wycheproof - special case tag */
+static const u8 enc_input086[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output086[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const u8 enc_assoc086[] __initconst = {
+ 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1,
+ 0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00
+};
+static const u8 enc_nonce086[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key086[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input087[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output087[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc087[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f,
+ 0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58
+};
+static const u8 enc_nonce087[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key087[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input088[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output088[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_assoc088[] __initconst = {
+ 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f,
+ 0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00
+};
+static const u8 enc_nonce088[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key088[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input089[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output089[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_assoc089[] __initconst = {
+ 0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae,
+ 0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02
+};
+static const u8 enc_nonce089[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key089[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input090[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output090[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_assoc090[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe,
+ 0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3
+};
+static const u8 enc_nonce090[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key090[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input091[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output091[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc091[] __initconst = {
+ 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30,
+ 0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01
+};
+static const u8 enc_nonce091[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key091[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input092[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output092[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc092[] __initconst = {
+ 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11,
+ 0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01
+};
+static const u8 enc_nonce092[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key092[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input093[] __initconst = {
0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d,
@@ -4455,6 +5954,86 @@ chacha20poly1305_enc_vectors[] __initconst = {
sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) },
{ enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012,
sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) },
+ { enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013,
+ sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) },
+ { enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014,
+ sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) },
+ { enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015,
+ sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) },
+ { enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016,
+ sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) },
+ { enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017,
+ sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) },
+ { enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018,
+ sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) },
+ { enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019,
+ sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) },
+ { enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020,
+ sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) },
+ { enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021,
+ sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) },
+ { enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022,
+ sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) },
+ { enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023,
+ sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) },
+ { enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024,
+ sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) },
+ { enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025,
+ sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) },
+ { enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026,
+ sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) },
+ { enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027,
+ sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) },
+ { enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028,
+ sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) },
+ { enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029,
+ sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) },
+ { enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030,
+ sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) },
+ { enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031,
+ sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) },
+ { enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032,
+ sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) },
+ { enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033,
+ sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) },
+ { enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034,
+ sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) },
+ { enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035,
+ sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) },
+ { enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036,
+ sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) },
+ { enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037,
+ sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) },
+ { enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038,
+ sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) },
+ { enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039,
+ sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) },
+ { enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040,
+ sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) },
+ { enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041,
+ sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) },
+ { enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042,
+ sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) },
+ { enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043,
+ sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) },
+ { enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044,
+ sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) },
+ { enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045,
+ sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) },
+ { enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046,
+ sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) },
+ { enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047,
+ sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) },
+ { enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048,
+ sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) },
+ { enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049,
+ sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) },
+ { enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050,
+ sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) },
+ { enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051,
+ sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) },
+ { enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052,
+ sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) },
{ enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053,
sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) },
{ enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054,
@@ -4497,6 +6076,10 @@ chacha20poly1305_enc_vectors[] __initconst = {
sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) },
{ enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073,
sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) },
+ { enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074,
+ sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) },
+ { enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075,
+ sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) },
{ enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076,
sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) },
{ enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077,
@@ -4517,6 +6100,20 @@ chacha20poly1305_enc_vectors[] __initconst = {
sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) },
{ enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085,
sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) },
+ { enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086,
+ sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) },
+ { enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087,
+ sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) },
+ { enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088,
+ sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) },
+ { enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089,
+ sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) },
+ { enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090,
+ sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) },
+ { enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091,
+ sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) },
+ { enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092,
+ sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) },
{ enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093,
sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) },
{ enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094,
@@ -7224,6 +8821,43 @@ xchacha20poly1305_dec_vectors[] __initconst = {
sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) }
};
+/* This is for the selftests-only, since it is only useful for the purpose of
+ * testing the underlying primitives and interactions.
+ */
+static void __init
+chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[12],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ u32 chacha20_state[CHACHA_STATE_WORDS];
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ __le64 lens[2];
+ } b = {{ 0 }};
+ u8 bottom_row[16] = { 0 };
+ u32 le_key[8];
+ int i;
+
+ memcpy(&bottom_row[4], nonce, 12);
+ for (i = 0; i < 8; ++i)
+ le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i);
+ chacha_init(chacha20_state, le_key, bottom_row);
+ chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+ poly1305_update(&poly1305_state, ad, ad_len);
+ poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf);
+ chacha20_crypt(chacha20_state, dst, src, src_len);
+ poly1305_update(&poly1305_state, dst, src_len);
+ poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf);
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+ poly1305_final(&poly1305_state, dst + src_len);
+}
+
static void __init
chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
@@ -7233,6 +8867,9 @@ chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len,
if (nonce_len == 8)
chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
get_unaligned_le64(nonce), key);
+ else if (nonce_len == 12)
+ chacha20poly1305_encrypt_bignonce(dst, src, src_len, ad,
+ ad_len, nonce, key);
else
BUG();
}
@@ -7248,14 +8885,14 @@ decryption_success(bool func_ret, bool expect_failure, int memcmp_result)
bool __init chacha20poly1305_selftest(void)
{
enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 };
- size_t i;
- u8 *computed_output = NULL, *heap_src = NULL;
- struct scatterlist sg_src;
+ size_t i, j, k, total_len;
+ u8 *computed_output = NULL, *input = NULL;
bool success = true, ret;
+ struct scatterlist sg_src[3];
- heap_src = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
- if (!heap_src || !computed_output) {
+ input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ if (!computed_output || !input) {
pr_err("chacha20poly1305 self-test malloc: FAIL\n");
success = false;
goto out;
@@ -7284,17 +8921,17 @@ bool __init chacha20poly1305_selftest(void)
for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
if (chacha20poly1305_enc_vectors[i].nlen != 8)
continue;
- memcpy(heap_src, chacha20poly1305_enc_vectors[i].input,
+ memcpy(computed_output, chacha20poly1305_enc_vectors[i].input,
chacha20poly1305_enc_vectors[i].ilen);
- sg_init_one(&sg_src, heap_src,
+ sg_init_one(sg_src, computed_output,
chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE);
- chacha20poly1305_encrypt_sg_inplace(&sg_src,
+ ret = chacha20poly1305_encrypt_sg_inplace(sg_src,
chacha20poly1305_enc_vectors[i].ilen,
chacha20poly1305_enc_vectors[i].assoc,
chacha20poly1305_enc_vectors[i].alen,
get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce),
chacha20poly1305_enc_vectors[i].key);
- if (memcmp(heap_src,
+ if (!ret || memcmp(computed_output,
chacha20poly1305_enc_vectors[i].output,
chacha20poly1305_enc_vectors[i].ilen +
POLY1305_DIGEST_SIZE)) {
@@ -7326,11 +8963,11 @@ bool __init chacha20poly1305_selftest(void)
}
for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
- memcpy(heap_src, chacha20poly1305_dec_vectors[i].input,
+ memcpy(computed_output, chacha20poly1305_dec_vectors[i].input,
chacha20poly1305_dec_vectors[i].ilen);
- sg_init_one(&sg_src, heap_src,
+ sg_init_one(sg_src, computed_output,
chacha20poly1305_dec_vectors[i].ilen);
- ret = chacha20poly1305_decrypt_sg_inplace(&sg_src,
+ ret = chacha20poly1305_decrypt_sg_inplace(sg_src,
chacha20poly1305_dec_vectors[i].ilen,
chacha20poly1305_dec_vectors[i].assoc,
chacha20poly1305_dec_vectors[i].alen,
@@ -7338,7 +8975,7 @@ bool __init chacha20poly1305_selftest(void)
chacha20poly1305_dec_vectors[i].key);
if (!decryption_success(ret,
chacha20poly1305_dec_vectors[i].failure,
- memcmp(heap_src, chacha20poly1305_dec_vectors[i].output,
+ memcmp(computed_output, chacha20poly1305_dec_vectors[i].output,
chacha20poly1305_dec_vectors[i].ilen -
POLY1305_DIGEST_SIZE))) {
pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n",
@@ -7365,6 +9002,7 @@ bool __init chacha20poly1305_selftest(void)
success = false;
}
}
+
for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) {
memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
ret = xchacha20poly1305_decrypt(computed_output,
@@ -7386,8 +9024,54 @@ bool __init chacha20poly1305_selftest(void)
}
}
+ for (total_len = POLY1305_DIGEST_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST)
+ && total_len <= 1 << 10; ++total_len) {
+ for (i = 0; i <= total_len; ++i) {
+ for (j = i; j <= total_len; ++j) {
+ sg_init_table(sg_src, 3);
+ sg_set_buf(&sg_src[0], input, i);
+ sg_set_buf(&sg_src[1], input + i, j - i);
+ sg_set_buf(&sg_src[2], input + j, total_len - j);
+ memset(computed_output, 0, total_len);
+ memset(input, 0, total_len);
+
+ if (!chacha20poly1305_encrypt_sg_inplace(sg_src,
+ total_len - POLY1305_DIGEST_SIZE, NULL, 0,
+ 0, enc_key001))
+ goto chunkfail;
+ chacha20poly1305_encrypt(computed_output,
+ computed_output,
+ total_len - POLY1305_DIGEST_SIZE, NULL, 0, 0,
+ enc_key001);
+ if (memcmp(computed_output, input, total_len))
+ goto chunkfail;
+ if (!chacha20poly1305_decrypt(computed_output,
+ input, total_len, NULL, 0, 0, enc_key001))
+ goto chunkfail;
+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
+ if (computed_output[k])
+ goto chunkfail;
+ }
+ if (!chacha20poly1305_decrypt_sg_inplace(sg_src,
+ total_len, NULL, 0, 0, enc_key001))
+ goto chunkfail;
+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
+ if (input[k])
+ goto chunkfail;
+ }
+ continue;
+
+ chunkfail:
+ pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n",
+ total_len, i, j);
+ success = false;
+ }
+
+ }
+ }
+
out:
- kfree(heap_src);
kfree(computed_output);
+ kfree(input);
return success;
}
diff --git a/lib/crypto/curve25519-generic.c b/lib/crypto/curve25519-generic.c
new file mode 100644
index 000000000000..de7c99172fa2
--- /dev/null
+++ b/lib/crypto/curve25519-generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the Curve25519 ECDH algorithm, using either
+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers,
+ * depending on what is supported by the target compiler.
+ *
+ * Information: https://cr.yp.to/ecdh.html
+ */
+
+#include <crypto/curve25519.h>
+#include <linux/module.h>
+
+const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
+const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
+
+EXPORT_SYMBOL(curve25519_null_point);
+EXPORT_SYMBOL(curve25519_base_point);
+EXPORT_SYMBOL(curve25519_generic);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Curve25519 scalar multiplication");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519-selftest.c b/lib/crypto/curve25519-selftest.c
new file mode 100644
index 000000000000..c85e85381e78
--- /dev/null
+++ b/lib/crypto/curve25519-selftest.c
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/curve25519.h>
+
+struct curve25519_test_vector {
+ u8 private[CURVE25519_KEY_SIZE];
+ u8 public[CURVE25519_KEY_SIZE];
+ u8 result[CURVE25519_KEY_SIZE];
+ bool valid;
+};
+static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = {
+ {
+ .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
+ 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45,
+ 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a,
+ 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a },
+ .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4,
+ 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37,
+ 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d,
+ 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f },
+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .valid = true
+ },
+ {
+ .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b,
+ 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6,
+ 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd,
+ 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb },
+ .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54,
+ 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a,
+ 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4,
+ 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a },
+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .valid = true
+ },
+ {
+ .private = { 1 },
+ .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64,
+ 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d,
+ 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98,
+ 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f },
+ .valid = true
+ },
+ {
+ .private = { 1 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f,
+ 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d,
+ 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3,
+ 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 },
+ .valid = true
+ },
+ {
+ .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 },
+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .valid = true
+ },
+ {
+ .private = { 1, 2, 3, 4 },
+ .public = { 0 },
+ .result = { 0 },
+ .valid = false
+ },
+ {
+ .private = { 2, 4, 6, 8 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 },
+ .result = { 0 },
+ .valid = false
+ },
+ {
+ .private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f },
+ .result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2,
+ 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57,
+ 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05,
+ 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 },
+ .valid = true
+ },
+ {
+ .private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 },
+ .result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d,
+ 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12,
+ 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99,
+ 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c },
+ .valid = true
+ },
+ /* wycheproof - normal case */
+ {
+ .private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda,
+ 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66,
+ 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3,
+ 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba },
+ .public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5,
+ 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9,
+ 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e,
+ 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a },
+ .result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5,
+ 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38,
+ 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e,
+ 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4,
+ 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5,
+ 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49,
+ 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 },
+ .public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5,
+ 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8,
+ 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3,
+ 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 },
+ .result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff,
+ 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d,
+ 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe,
+ 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9,
+ 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39,
+ 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5,
+ 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 },
+ .public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f,
+ 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b,
+ 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c,
+ 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 },
+ .result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53,
+ 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57,
+ 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0,
+ 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc,
+ 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d,
+ 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67,
+ 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c },
+ .public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97,
+ 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f,
+ 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45,
+ 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a },
+ .result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93,
+ 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2,
+ 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44,
+ 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1,
+ 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95,
+ 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99,
+ 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d },
+ .public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27,
+ 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07,
+ 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae,
+ 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c },
+ .result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73,
+ 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2,
+ 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f,
+ 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9,
+ 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd,
+ 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b,
+ 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 },
+ .public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5,
+ 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52,
+ 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8,
+ 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 },
+ .result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86,
+ 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4,
+ 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6,
+ 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 },
+ .valid = true
+ },
+ /* wycheproof - public key = 0 */
+ {
+ .private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11,
+ 0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac,
+ 0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b,
+ 0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key = 1 */
+ {
+ .private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61,
+ 0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea,
+ 0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f,
+ 0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab },
+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04,
+ 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77,
+ 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90,
+ 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 },
+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97,
+ 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9,
+ 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7,
+ 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36,
+ 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd,
+ 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c,
+ 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 },
+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e,
+ 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b,
+ 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e,
+ 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed,
+ 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e,
+ 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd,
+ 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 },
+ .public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 },
+ .result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f,
+ 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1,
+ 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10,
+ 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3,
+ 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d,
+ 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00,
+ 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 },
+ .public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f },
+ .result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8,
+ 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4,
+ 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70,
+ 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3,
+ 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a,
+ 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e,
+ 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57,
+ 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c,
+ 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59,
+ 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f,
+ 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42,
+ 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9,
+ 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 },
+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c,
+ 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5,
+ 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65,
+ 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6,
+ 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4,
+ 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8,
+ 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe },
+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7,
+ 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca,
+ 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f,
+ 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa,
+ 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3,
+ 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52,
+ 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3,
+ 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e,
+ 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75,
+ 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26,
+ 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea,
+ 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00,
+ 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8,
+ 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32,
+ 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87,
+ 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c,
+ 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6,
+ 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb,
+ 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 },
+ .public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff,
+ 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff,
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f },
+ .result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85,
+ 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f,
+ 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0,
+ 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38,
+ 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b,
+ 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c,
+ 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b,
+ 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81,
+ 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3,
+ 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d,
+ 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42,
+ 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98,
+ 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f },
+ .result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c,
+ 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9,
+ 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89,
+ 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29,
+ 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6,
+ 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c,
+ 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f },
+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75,
+ 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89,
+ 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c,
+ 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f },
+ .valid = true
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30,
+ 0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69,
+ 0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14,
+ 0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3,
+ 0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b,
+ 0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef,
+ 0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 },
+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20,
+ 0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf,
+ 0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43,
+ 0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 },
+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f,
+ 0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65,
+ 0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06,
+ 0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 },
+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe,
+ 0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9,
+ 0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f,
+ 0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f },
+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8,
+ 0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85,
+ 0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c,
+ 0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8,
+ 0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d,
+ 0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0,
+ 0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 },
+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a,
+ 0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b,
+ 0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67,
+ 0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46,
+ 0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02,
+ 0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3,
+ 0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 },
+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30,
+ 0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1,
+ 0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6,
+ 0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe },
+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f,
+ 0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77,
+ 0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0,
+ 0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c },
+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e,
+ 0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f,
+ 0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77,
+ 0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b },
+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc,
+ 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1,
+ 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d,
+ 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae },
+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09,
+ 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde,
+ 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1,
+ 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81,
+ 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a,
+ 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99,
+ 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d },
+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17,
+ 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35,
+ 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55,
+ 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11,
+ 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b,
+ 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9,
+ 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 },
+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53,
+ 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e,
+ 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6,
+ 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78,
+ 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2,
+ 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd,
+ 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb,
+ 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40,
+ 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2,
+ 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9,
+ 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60,
+ 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13,
+ 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 },
+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c,
+ 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3,
+ 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65,
+ 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a,
+ 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7,
+ 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11,
+ 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e },
+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82,
+ 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4,
+ 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c,
+ 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e,
+ 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a,
+ 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d,
+ 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f },
+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2,
+ 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60,
+ 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25,
+ 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb,
+ 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97,
+ 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c,
+ 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 },
+ .public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23,
+ 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8,
+ 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69,
+ 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a,
+ 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23,
+ 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b,
+ 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 },
+ .public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b,
+ 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44,
+ 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37,
+ 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80,
+ 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d,
+ 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b,
+ 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 },
+ .public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63,
+ 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae,
+ 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f,
+ 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0,
+ 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd,
+ 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49,
+ 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 },
+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41,
+ 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0,
+ 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf,
+ 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9,
+ 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa,
+ 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5,
+ 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e },
+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47,
+ 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3,
+ 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b,
+ 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8,
+ 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98,
+ 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0,
+ 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 },
+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0,
+ 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1,
+ 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a,
+ 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02,
+ 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4,
+ 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68,
+ 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d },
+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f,
+ 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2,
+ 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95,
+ 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7,
+ 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06,
+ 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9,
+ 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 },
+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5,
+ 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0,
+ 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80,
+ 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd,
+ 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4,
+ 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04,
+ 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0,
+ 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac,
+ 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48,
+ 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 },
+ .valid = true
+ },
+ /* wycheproof - RFC 7748 */
+ {
+ .private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 },
+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .valid = true
+ },
+ /* wycheproof - RFC 7748 */
+ {
+ .private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c,
+ 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5,
+ 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4,
+ 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d },
+ .public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3,
+ 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c,
+ 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e,
+ 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 },
+ .result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d,
+ 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8,
+ 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52,
+ 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde,
+ 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8,
+ 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4,
+ 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 },
+ .result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d,
+ 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64,
+ 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd,
+ 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 },
+ .result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8,
+ 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf,
+ 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94,
+ 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d },
+ .result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84,
+ 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62,
+ 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e,
+ 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 },
+ .result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8,
+ 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58,
+ 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02,
+ 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 },
+ .result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9,
+ 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a,
+ 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44,
+ 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b },
+ .result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd,
+ 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22,
+ 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56,
+ 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b },
+ .result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53,
+ 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f,
+ 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18,
+ 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f },
+ .result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55,
+ 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b,
+ 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79,
+ 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f },
+ .result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39,
+ 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c,
+ 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb,
+ 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e },
+ .result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04,
+ 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10,
+ 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58,
+ 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c },
+ .result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3,
+ 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c,
+ 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88,
+ 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 },
+ .result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a,
+ 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49,
+ 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a,
+ 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca,
+ 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c,
+ 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb,
+ 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58,
+ 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7,
+ 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01,
+ 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d },
+ .result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d,
+ 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27,
+ 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b,
+ 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26,
+ 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2,
+ 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44,
+ 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e },
+ .result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6,
+ 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d,
+ 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e,
+ 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61,
+ 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67,
+ 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e,
+ 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c },
+ .result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65,
+ 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce,
+ 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0,
+ 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee,
+ 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d,
+ 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14,
+ 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 },
+ .result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e,
+ 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc,
+ 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5,
+ 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4,
+ 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5,
+ 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c,
+ 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 },
+ .result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b,
+ 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93,
+ 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f,
+ 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 },
+ .valid = true
+ },
+ /* wycheproof - private key == -1 (mod order) */
+ {
+ .private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8,
+ 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 },
+ .public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .valid = true
+ },
+ /* wycheproof - private key == 1 (mod order) on twist */
+ {
+ .private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef,
+ 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f },
+ .public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .valid = true
+ }
+};
+
+bool __init curve25519_selftest(void)
+{
+ bool success = true, ret, ret2;
+ size_t i = 0, j;
+ u8 in[CURVE25519_KEY_SIZE];
+ u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE],
+ out3[CURVE25519_KEY_SIZE];
+
+ for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) {
+ memset(out, 0, CURVE25519_KEY_SIZE);
+ ret = curve25519(out, curve25519_test_vectors[i].private,
+ curve25519_test_vectors[i].public);
+ if (ret != curve25519_test_vectors[i].valid ||
+ memcmp(out, curve25519_test_vectors[i].result,
+ CURVE25519_KEY_SIZE)) {
+ pr_err("curve25519 self-test %zu: FAIL\n", i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < 5; ++i) {
+ get_random_bytes(in, sizeof(in));
+ ret = curve25519_generate_public(out, in);
+ ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
+ curve25519_generic(out3, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
+ if (ret != ret2 ||
+ memcmp(out, out2, CURVE25519_KEY_SIZE) ||
+ memcmp(out, out3, CURVE25519_KEY_SIZE)) {
+ pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x",
+ i + 1);
+ for (j = CURVE25519_KEY_SIZE; j-- > 0;)
+ printk(KERN_CONT "%02x", in[j]);
+ printk(KERN_CONT "\n");
+ success = false;
+ }
+ }
+
+ return success;
+}
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
index 0106bebe6900..288a62cd29b2 100644
--- a/lib/crypto/curve25519.c
+++ b/lib/crypto/curve25519.c
@@ -13,12 +13,22 @@
#include <linux/module.h>
#include <linux/init.h>
-const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
-const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
+bool curve25519_selftest(void);
-EXPORT_SYMBOL(curve25519_null_point);
-EXPORT_SYMBOL(curve25519_base_point);
-EXPORT_SYMBOL(curve25519_generic);
+static int __init mod_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!curve25519_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit mod_exit(void)
+{
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Curve25519 scalar multiplication");
diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
new file mode 100644
index 000000000000..3cc77d94390b
--- /dev/null
+++ b/lib/crypto/poly1305-donna32.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/poly1305.h>
+
+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
+{
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
+ key->key.r[1] = (get_unaligned_le32(&raw_key[3]) >> 2) & 0x3ffff03;
+ key->key.r[2] = (get_unaligned_le32(&raw_key[6]) >> 4) & 0x3ffc0ff;
+ key->key.r[3] = (get_unaligned_le32(&raw_key[9]) >> 6) & 0x3f03fff;
+ key->key.r[4] = (get_unaligned_le32(&raw_key[12]) >> 8) & 0x00fffff;
+
+ /* s = 5*r */
+ key->precomputed_s.r[0] = key->key.r[1] * 5;
+ key->precomputed_s.r[1] = key->key.r[2] * 5;
+ key->precomputed_s.r[2] = key->key.r[3] * 5;
+ key->precomputed_s.r[3] = key->key.r[4] * 5;
+}
+EXPORT_SYMBOL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ const u8 *input = src;
+ u32 r0, r1, r2, r3, r4;
+ u32 s1, s2, s3, s4;
+ u32 h0, h1, h2, h3, h4;
+ u64 d0, d1, d2, d3, d4;
+ u32 c;
+
+ if (!nblocks)
+ return;
+
+ hibit <<= 24;
+
+ r0 = key->key.r[0];
+ r1 = key->key.r[1];
+ r2 = key->key.r[2];
+ r3 = key->key.r[3];
+ r4 = key->key.r[4];
+
+ s1 = key->precomputed_s.r[0];
+ s2 = key->precomputed_s.r[1];
+ s3 = key->precomputed_s.r[2];
+ s4 = key->precomputed_s.r[3];
+
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ do {
+ /* h += m[i] */
+ h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff;
+ h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit;
+
+ /* h *= r */
+ d0 = ((u64)h0 * r0) + ((u64)h1 * s4) +
+ ((u64)h2 * s3) + ((u64)h3 * s2) +
+ ((u64)h4 * s1);
+ d1 = ((u64)h0 * r1) + ((u64)h1 * r0) +
+ ((u64)h2 * s4) + ((u64)h3 * s3) +
+ ((u64)h4 * s2);
+ d2 = ((u64)h0 * r2) + ((u64)h1 * r1) +
+ ((u64)h2 * r0) + ((u64)h3 * s4) +
+ ((u64)h4 * s3);
+ d3 = ((u64)h0 * r3) + ((u64)h1 * r2) +
+ ((u64)h2 * r1) + ((u64)h3 * r0) +
+ ((u64)h4 * s4);
+ d4 = ((u64)h0 * r4) + ((u64)h1 * r3) +
+ ((u64)h2 * r2) + ((u64)h3 * r1) +
+ ((u64)h4 * r0);
+
+ /* (partial) h %= p */
+ c = (u32)(d0 >> 26);
+ h0 = (u32)d0 & 0x3ffffff;
+ d1 += c;
+ c = (u32)(d1 >> 26);
+ h1 = (u32)d1 & 0x3ffffff;
+ d2 += c;
+ c = (u32)(d2 >> 26);
+ h2 = (u32)d2 & 0x3ffffff;
+ d3 += c;
+ c = (u32)(d3 >> 26);
+ h3 = (u32)d3 & 0x3ffffff;
+ d4 += c;
+ c = (u32)(d4 >> 26);
+ h4 = (u32)d4 & 0x3ffffff;
+ h0 += c * 5;
+ c = (h0 >> 26);
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h[0] = h0;
+ state->h[1] = h1;
+ state->h[2] = h2;
+ state->h[3] = h3;
+ state->h[4] = h4;
+}
+EXPORT_SYMBOL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst)
+{
+ u8 *mac = dst;
+ u32 h0, h1, h2, h3, h4, c;
+ u32 g0, g1, g2, g3, g4;
+ u64 f;
+ u32 mask;
+
+ /* fully carry h */
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ c = h1 >> 26;
+ h1 = h1 & 0x3ffffff;
+ h2 += c;
+ c = h2 >> 26;
+ h2 = h2 & 0x3ffffff;
+ h3 += c;
+ c = h3 >> 26;
+ h3 = h3 & 0x3ffffff;
+ h4 += c;
+ c = h4 >> 26;
+ h4 = h4 & 0x3ffffff;
+ h0 += c * 5;
+ c = h0 >> 26;
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 26;
+ g0 &= 0x3ffffff;
+ g1 = h1 + c;
+ c = g1 >> 26;
+ g1 &= 0x3ffffff;
+ g2 = h2 + c;
+ c = g2 >> 26;
+ g2 &= 0x3ffffff;
+ g3 = h3 + c;
+ c = g3 >> 26;
+ g3 &= 0x3ffffff;
+ g4 = h4 + c - (1UL << 26);
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ h0 = ((h0) | (h1 << 26)) & 0xffffffff;
+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
+
+ if (likely(nonce)) {
+ /* mac = (h + nonce) % (2^128) */
+ f = (u64)h0 + nonce[0];
+ h0 = (u32)f;
+ f = (u64)h1 + nonce[1] + (f >> 32);
+ h1 = (u32)f;
+ f = (u64)h2 + nonce[2] + (f >> 32);
+ h2 = (u32)f;
+ f = (u64)h3 + nonce[3] + (f >> 32);
+ h3 = (u32)f;
+ }
+
+ put_unaligned_le32(h0, &mac[0]);
+ put_unaligned_le32(h1, &mac[4]);
+ put_unaligned_le32(h2, &mac[8]);
+ put_unaligned_le32(h3, &mac[12]);
+}
+EXPORT_SYMBOL(poly1305_core_emit);
diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
new file mode 100644
index 000000000000..6ae181bb4345
--- /dev/null
+++ b/lib/crypto/poly1305-donna64.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/poly1305.h>
+
+typedef __uint128_t u128;
+
+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
+{
+ u64 t0, t1;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ t0 = get_unaligned_le64(&raw_key[0]);
+ t1 = get_unaligned_le64(&raw_key[8]);
+
+ key->key.r64[0] = t0 & 0xffc0fffffffULL;
+ key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL;
+ key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL;
+
+ /* s = 20*r */
+ key->precomputed_s.r64[0] = key->key.r64[1] * 20;
+ key->precomputed_s.r64[1] = key->key.r64[2] * 20;
+}
+EXPORT_SYMBOL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ const u8 *input = src;
+ u64 hibit64;
+ u64 r0, r1, r2;
+ u64 s1, s2;
+ u64 h0, h1, h2;
+ u64 c;
+ u128 d0, d1, d2, d;
+
+ if (!nblocks)
+ return;
+
+ hibit64 = ((u64)hibit) << 40;
+
+ r0 = key->key.r64[0];
+ r1 = key->key.r64[1];
+ r2 = key->key.r64[2];
+
+ h0 = state->h64[0];
+ h1 = state->h64[1];
+ h2 = state->h64[2];
+
+ s1 = key->precomputed_s.r64[0];
+ s2 = key->precomputed_s.r64[1];
+
+ do {
+ u64 t0, t1;
+
+ /* h += m[i] */
+ t0 = get_unaligned_le64(&input[0]);
+ t1 = get_unaligned_le64(&input[8]);
+
+ h0 += t0 & 0xfffffffffffULL;
+ h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL;
+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64;
+
+ /* h *= r */
+ d0 = (u128)h0 * r0;
+ d = (u128)h1 * s2;
+ d0 += d;
+ d = (u128)h2 * s1;
+ d0 += d;
+ d1 = (u128)h0 * r1;
+ d = (u128)h1 * r0;
+ d1 += d;
+ d = (u128)h2 * s2;
+ d1 += d;
+ d2 = (u128)h0 * r2;
+ d = (u128)h1 * r1;
+ d2 += d;
+ d = (u128)h2 * r0;
+ d2 += d;
+
+ /* (partial) h %= p */
+ c = (u64)(d0 >> 44);
+ h0 = (u64)d0 & 0xfffffffffffULL;
+ d1 += c;
+ c = (u64)(d1 >> 44);
+ h1 = (u64)d1 & 0xfffffffffffULL;
+ d2 += c;
+ c = (u64)(d2 >> 42);
+ h2 = (u64)d2 & 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 = h0 & 0xfffffffffffULL;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h64[0] = h0;
+ state->h64[1] = h1;
+ state->h64[2] = h2;
+}
+EXPORT_SYMBOL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst)
+{
+ u8 *mac = dst;
+ u64 h0, h1, h2, c;
+ u64 g0, g1, g2;
+ u64 t0, t1;
+
+ /* fully carry h */
+ h0 = state->h64[0];
+ h1 = state->h64[1];
+ h2 = state->h64[2];
+
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += c;
+ c = h2 >> 42;
+ h2 &= 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += c;
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += c;
+ c = h2 >> 42;
+ h2 &= 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 44;
+ g0 &= 0xfffffffffffULL;
+ g1 = h1 + c;
+ c = g1 >> 44;
+ g1 &= 0xfffffffffffULL;
+ g2 = h2 + c - (1ULL << 42);
+
+ /* select h if h < p, or h + -p if h >= p */
+ c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1;
+ g0 &= c;
+ g1 &= c;
+ g2 &= c;
+ c = ~c;
+ h0 = (h0 & c) | g0;
+ h1 = (h1 & c) | g1;
+ h2 = (h2 & c) | g2;
+
+ if (likely(nonce)) {
+ /* h = (h + nonce) */
+ t0 = ((u64)nonce[1] << 32) | nonce[0];
+ t1 = ((u64)nonce[3] << 32) | nonce[2];
+
+ h0 += t0 & 0xfffffffffffULL;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c;
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c;
+ h2 &= 0x3ffffffffffULL;
+ }
+
+ /* mac = h % (2^128) */
+ h0 = h0 | (h1 << 44);
+ h1 = (h1 >> 20) | (h2 << 24);
+
+ put_unaligned_le64(h0, &mac[0]);
+ put_unaligned_le64(h1, &mac[8]);
+}
+EXPORT_SYMBOL(poly1305_core_emit);
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
index 32ec293c65ae..9d2d14df0fee 100644
--- a/lib/crypto/poly1305.c
+++ b/lib/crypto/poly1305.c
@@ -12,151 +12,9 @@
#include <linux/module.h>
#include <asm/unaligned.h>
-static inline u64 mlt(u64 a, u64 b)
-{
- return a * b;
-}
-
-static inline u32 sr(u64 v, u_char n)
-{
- return v >> n;
-}
-
-static inline u32 and(u32 v, u32 mask)
-{
- return v & mask;
-}
-
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key)
-{
- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff;
- key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03;
- key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff;
- key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff;
- key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff;
-}
-EXPORT_SYMBOL_GPL(poly1305_core_setkey);
-
-void poly1305_core_blocks(struct poly1305_state *state,
- const struct poly1305_key *key, const void *src,
- unsigned int nblocks, u32 hibit)
-{
- u32 r0, r1, r2, r3, r4;
- u32 s1, s2, s3, s4;
- u32 h0, h1, h2, h3, h4;
- u64 d0, d1, d2, d3, d4;
-
- if (!nblocks)
- return;
-
- r0 = key->r[0];
- r1 = key->r[1];
- r2 = key->r[2];
- r3 = key->r[3];
- r4 = key->r[4];
-
- s1 = r1 * 5;
- s2 = r2 * 5;
- s3 = r3 * 5;
- s4 = r4 * 5;
-
- h0 = state->h[0];
- h1 = state->h[1];
- h2 = state->h[2];
- h3 = state->h[3];
- h4 = state->h[4];
-
- do {
- /* h += m[i] */
- h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
- h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
- h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff;
- h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff;
- h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24);
-
- /* h *= r */
- d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
- mlt(h3, s2) + mlt(h4, s1);
- d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) +
- mlt(h3, s3) + mlt(h4, s2);
- d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) +
- mlt(h3, s4) + mlt(h4, s3);
- d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) +
- mlt(h3, r0) + mlt(h4, s4);
- d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) +
- mlt(h3, r1) + mlt(h4, r0);
-
- /* (partial) h %= p */
- d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff);
- d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff);
- d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff);
- d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff);
- h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff);
- h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
-
- src += POLY1305_BLOCK_SIZE;
- } while (--nblocks);
-
- state->h[0] = h0;
- state->h[1] = h1;
- state->h[2] = h2;
- state->h[3] = h3;
- state->h[4] = h4;
-}
-EXPORT_SYMBOL_GPL(poly1305_core_blocks);
-
-void poly1305_core_emit(const struct poly1305_state *state, void *dst)
-{
- u32 h0, h1, h2, h3, h4;
- u32 g0, g1, g2, g3, g4;
- u32 mask;
-
- /* fully carry h */
- h0 = state->h[0];
- h1 = state->h[1];
- h2 = state->h[2];
- h3 = state->h[3];
- h4 = state->h[4];
-
- h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
- h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
- h4 += (h3 >> 26); h3 = h3 & 0x3ffffff;
- h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff;
- h1 += (h0 >> 26); h0 = h0 & 0x3ffffff;
-
- /* compute h + -p */
- g0 = h0 + 5;
- g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff;
- g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff;
- g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff;
- g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff;
-
- /* select h if h < p, or h + -p if h >= p */
- mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
- g0 &= mask;
- g1 &= mask;
- g2 &= mask;
- g3 &= mask;
- g4 &= mask;
- mask = ~mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
- h2 = (h2 & mask) | g2;
- h3 = (h3 & mask) | g3;
- h4 = (h4 & mask) | g4;
-
- /* h = h % (2^128) */
- put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0);
- put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4);
- put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8);
- put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12);
-}
-EXPORT_SYMBOL_GPL(poly1305_core_emit);
-
void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
{
- poly1305_core_setkey(desc->r, key);
+ poly1305_core_setkey(&desc->core_r, key);
desc->s[0] = get_unaligned_le32(key + 16);
desc->s[1] = get_unaligned_le32(key + 20);
desc->s[2] = get_unaligned_le32(key + 24);
@@ -164,7 +22,7 @@ void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
poly1305_core_init(&desc->h);
desc->buflen = 0;
desc->sset = true;
- desc->rset = 1;
+ desc->rset = 2;
}
EXPORT_SYMBOL_GPL(poly1305_init_generic);
@@ -181,13 +39,14 @@ void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
desc->buflen += bytes;
if (desc->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 1);
+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf,
+ 1, 1);
desc->buflen = 0;
}
}
if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
- poly1305_core_blocks(&desc->h, desc->r, src,
+ poly1305_core_blocks(&desc->h, &desc->core_r, src,
nbytes / POLY1305_BLOCK_SIZE, 1);
src += nbytes - (nbytes % POLY1305_BLOCK_SIZE);
nbytes %= POLY1305_BLOCK_SIZE;
@@ -202,28 +61,14 @@ EXPORT_SYMBOL_GPL(poly1305_update_generic);
void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst)
{
- __le32 digest[4];
- u64 f = 0;
-
if (unlikely(desc->buflen)) {
desc->buf[desc->buflen++] = 1;
memset(desc->buf + desc->buflen, 0,
POLY1305_BLOCK_SIZE - desc->buflen);
- poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 0);
+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0);
}
- poly1305_core_emit(&desc->h, digest);
-
- /* mac = (h + s) % (2^128) */
- f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0];
- put_unaligned_le32(f, dst + 0);
- f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1];
- put_unaligned_le32(f, dst + 4);
- f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2];
- put_unaligned_le32(f, dst + 8);
- f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3];
- put_unaligned_le32(f, dst + 12);
-
+ poly1305_core_emit(&desc->h, desc->s, dst);
*desc = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL_GPL(poly1305_final_generic);