diff options
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r-- | arch/x86/crypto/.gitignore | 1 | ||||
-rw-r--r-- | arch/x86/crypto/Makefile | 162 | ||||
-rw-r--r-- | arch/x86/crypto/aesni-intel_avx-x86_64.S | 6 | ||||
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 21 | ||||
-rw-r--r-- | arch/x86/crypto/blake2s-core.S | 2 | ||||
-rw-r--r-- | arch/x86/crypto/chacha_glue.c | 6 | ||||
-rw-r--r-- | arch/x86/crypto/poly1305-x86_64-cryptogams.pl | 16 | ||||
-rw-r--r-- | arch/x86/crypto/poly1305_glue.c | 11 | ||||
-rw-r--r-- | arch/x86/crypto/sha1_ssse3_asm.S | 4 | ||||
-rw-r--r-- | arch/x86/crypto/sha1_ssse3_glue.c | 13 | ||||
-rw-r--r-- | arch/x86/crypto/sha256-avx-asm.S | 3 | ||||
-rw-r--r-- | arch/x86/crypto/sha256-avx2-asm.S | 3 | ||||
-rw-r--r-- | arch/x86/crypto/sha256_ssse3_glue.c | 12 | ||||
-rw-r--r-- | arch/x86/crypto/sha512-avx-asm.S | 2 | ||||
-rw-r--r-- | arch/x86/crypto/sha512-avx2-asm.S | 3 | ||||
-rw-r--r-- | arch/x86/crypto/sha512_ssse3_glue.c | 10 |
16 files changed, 73 insertions, 202 deletions
diff --git a/arch/x86/crypto/.gitignore b/arch/x86/crypto/.gitignore index 30be0400a439..580c839bb177 100644 --- a/arch/x86/crypto/.gitignore +++ b/arch/x86/crypto/.gitignore @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only poly1305-x86_64-cryptogams.S diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 8c2e9eadee8a..a31de0c6ccde 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -1,131 +1,97 @@ # SPDX-License-Identifier: GPL-2.0 # -# Arch-specific CryptoAPI modules. -# +# x86 crypto algorithms OBJECT_FILES_NON_STANDARD := y -avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no) -sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no) -sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no) -adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no) - obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o +twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o +obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o +twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o +obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o +twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o +obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o +twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o twofish_avx_glue.o + obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o +serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o +serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o +serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o +obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o +serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o +des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o + obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o +camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o +obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o +camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o camellia_aesni_avx_glue.o +obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o +camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o + obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o -obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o -obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o -obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o -obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o -obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o -obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o +blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o -obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o -obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o -obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o -obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o -obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o -obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o +obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o +cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o + +obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o +cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o +aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o -obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o -obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o +obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o +chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha_glue.o +chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o -# These modules require the assembler to support ADX. -ifeq ($(adx_supported),yes) - obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o -endif - -# These modules require assembler to support AVX. -ifeq ($(avx_supported),yes) - obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += \ - camellia-aesni-avx-x86_64.o - obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o - obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o - obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o - obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o - obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o -endif - -# These modules require assembler to support AVX2. -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o - obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o -endif +obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o +aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o +aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o -twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o -serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o +sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o +sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o -des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o -camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o -blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o -twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o -twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o -chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o -serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o +sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o +sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o -aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o +obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o +sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o -nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o +obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o blake2s-x86_64-y := blake2s-core.o blake2s-glue.o -poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o -ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),) -targets += poly1305-x86_64-cryptogams.S -endif - -ifeq ($(avx_supported),yes) - camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ - camellia_aesni_avx_glue.o - cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o - cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o - twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o \ - twofish_avx_glue.o - serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o \ - serpent_avx_glue.o -endif - -ifeq ($(avx2_supported),yes) - camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o - chacha-x86_64-y += chacha-avx2-x86_64.o - serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o - - nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o -endif - -ifeq ($(avx512_supported),yes) - chacha-x86_64-y += chacha-avx512vl-x86_64.o -endif -aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o -aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o +obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o -sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o -ifeq ($(avx2_supported),yes) -sha1-ssse3-y += sha1_avx2_x86_64_asm.o -endif -ifeq ($(sha1_ni_supported),yes) -sha1-ssse3-y += sha1_ni_asm.o -endif + +obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o crc32c-intel-y := crc32c-intel_glue.o crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o + +obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o -sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o -ifeq ($(sha256_ni_supported),yes) -sha256-ssse3-y += sha256_ni_asm.o -endif -sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o + +obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o +obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o +poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o +targets += poly1305-x86_64-cryptogams.S + +obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o +nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o +obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o +nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o + +obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o + quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ $(obj)/%.S: $(src)/%.pl FORCE diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index bfa1c0b3e5b4..0cea33295287 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -886,7 +886,6 @@ _less_than_8_bytes_left_\@: _partial_block_done_\@: .endm # PARTIAL_BLOCK -#ifdef CONFIG_AS_AVX ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) @@ -1869,9 +1868,6 @@ key_256_finalize: ret SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) -#endif /* CONFIG_AS_AVX */ - -#ifdef CONFIG_AS_AVX2 ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) @@ -2839,5 +2835,3 @@ key_256_finalize4: FUNC_RESTORE ret SYM_FUNC_END(aesni_gcm_finalize_avx_gen4) - -#endif /* CONFIG_AS_AVX2 */ diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 75b6ea20491e..ad8a7188a2bf 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -185,7 +185,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { .finalize = &aesni_gcm_finalize, }; -#ifdef CONFIG_AS_AVX asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, @@ -234,9 +233,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .finalize = &aesni_gcm_finalize_avx_gen2, }; -#endif - -#ifdef CONFIG_AS_AVX2 /* * asmlinkage void aesni_gcm_init_avx_gen4() * gcm_data *my_ctx_data, context data @@ -279,8 +275,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { .finalize = &aesni_gcm_finalize_avx_gen4, }; -#endif - static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { @@ -476,7 +470,6 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, crypto_inc(ctrblk, AES_BLOCK_SIZE); } -#ifdef CONFIG_AS_AVX static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { @@ -493,7 +486,6 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, else aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); } -#endif static int ctr_crypt(struct skcipher_request *req) { @@ -711,14 +703,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, if (!enc) left -= auth_tag_len; -#ifdef CONFIG_AS_AVX2 if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) gcm_tfm = &aesni_gcm_tfm_avx_gen2; -#endif -#ifdef CONFIG_AS_AVX if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) gcm_tfm = &aesni_gcm_tfm_sse; -#endif /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && @@ -1076,31 +1064,24 @@ static int __init aesni_init(void) if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 -#ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; } else -#endif -#ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; - } else -#endif - { + } else { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_sse; } aesni_ctr_enc_tfm = aesni_ctr_enc; -#ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; pr_info("AES CTR mode by8 optimization enabled\n"); } #endif -#endif err = crypto_register_alg(&aesni_cipher_alg); if (err) diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S index 24910b766bdd..2ca79974f819 100644 --- a/arch/x86/crypto/blake2s-core.S +++ b/arch/x86/crypto/blake2s-core.S @@ -46,7 +46,6 @@ SIGMA2: #endif /* CONFIG_AS_AVX512 */ .text -#ifdef CONFIG_AS_SSSE3 SYM_FUNC_START(blake2s_compress_ssse3) testq %rdx,%rdx je .Lendofloop @@ -174,7 +173,6 @@ SYM_FUNC_START(blake2s_compress_ssse3) .Lendofloop: ret SYM_FUNC_END(blake2s_compress_ssse3) -#endif /* CONFIG_AS_SSSE3 */ #ifdef CONFIG_AS_AVX512 SYM_FUNC_START(blake2s_compress_avx512) diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 68a74953efaf..b412c21ee06e 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -79,8 +79,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, } } - if (IS_ENABLED(CONFIG_AS_AVX2) && - static_branch_likely(&chacha_use_avx2)) { + if (static_branch_likely(&chacha_use_avx2)) { while (bytes >= CHACHA_BLOCK_SIZE * 8) { chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); bytes -= CHACHA_BLOCK_SIZE * 8; @@ -288,8 +287,7 @@ static int __init chacha_simd_mod_init(void) static_branch_enable(&chacha_use_simd); - if (IS_ENABLED(CONFIG_AS_AVX2) && - boot_cpu_has(X86_FEATURE_AVX) && + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { static_branch_enable(&chacha_use_avx2); diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl index 7a6b5380a46f..137edcf038cb 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl @@ -404,10 +404,6 @@ ___ &end_function("poly1305_emit_x86_64"); if ($avx) { -if($kernel) { - $code .= "#ifdef CONFIG_AS_AVX\n"; -} - ######################################################################## # Layout of opaque area is following. # @@ -1516,16 +1512,8 @@ $code.=<<___; ___ &end_function("poly1305_emit_avx"); -if ($kernel) { - $code .= "#endif\n"; -} - if ($avx>1) { -if ($kernel) { - $code .= "#ifdef CONFIG_AS_AVX2\n"; -} - my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = map("%ymm$_",(0..15)); my $S4=$MASK; @@ -2816,10 +2804,6 @@ ___ poly1305_blocks_avxN(0); &end_function("poly1305_blocks_avx2"); -if($kernel) { - $code .= "#endif\n"; -} - ####################################################################### if ($avx>2) { # On entry we have input length divisible by 64. But since inner loop diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 79bb58737d52..6dfec19f7d57 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -94,7 +94,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || PAGE_SIZE % POLY1305_BLOCK_SIZE); - if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || + if (!static_branch_likely(&poly1305_use_avx) || (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || !crypto_simd_usable()) { convert_to_base2_64(ctx); @@ -108,7 +108,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, kernel_fpu_begin(); if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) poly1305_blocks_avx512(ctx, inp, bytes, padbit); - else if (IS_ENABLED(CONFIG_AS_AVX2) && static_branch_likely(&poly1305_use_avx2)) + else if (static_branch_likely(&poly1305_use_avx2)) poly1305_blocks_avx2(ctx, inp, bytes, padbit); else poly1305_blocks_avx(ctx, inp, bytes, padbit); @@ -123,7 +123,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], const u32 nonce[4]) { - if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx)) + if (!static_branch_likely(&poly1305_use_avx)) poly1305_emit_x86_64(ctx, mac, nonce); else poly1305_emit_avx(ctx, mac, nonce); @@ -261,11 +261,10 @@ static struct shash_alg alg = { static int __init poly1305_simd_mod_init(void) { - if (IS_ENABLED(CONFIG_AS_AVX) && boot_cpu_has(X86_FEATURE_AVX) && + if (boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) static_branch_enable(&poly1305_use_avx); - if (IS_ENABLED(CONFIG_AS_AVX2) && boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) static_branch_enable(&poly1305_use_avx2); if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) && diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index 12e2d19d7402..d25668d2a1e9 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -467,8 +467,6 @@ W_PRECALC_SSSE3 */ SHA1_VECTOR_ASM sha1_transform_ssse3 -#ifdef CONFIG_AS_AVX - .macro W_PRECALC_AVX .purgem W_PRECALC_00_15 @@ -553,5 +551,3 @@ W_PRECALC_AVX * const u8 *data, int blocks); */ SHA1_VECTOR_ASM sha1_transform_avx - -#endif diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index d70b40ad594c..a801ffc10cbb 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -114,7 +114,6 @@ static void unregister_sha1_ssse3(void) crypto_unregister_shash(&sha1_ssse3_alg); } -#ifdef CONFIG_AS_AVX asmlinkage void sha1_transform_avx(struct sha1_state *state, const u8 *data, int blocks); @@ -175,13 +174,6 @@ static void unregister_sha1_avx(void) crypto_unregister_shash(&sha1_avx_alg); } -#else /* CONFIG_AS_AVX */ -static inline int register_sha1_avx(void) { return 0; } -static inline void unregister_sha1_avx(void) { } -#endif /* CONFIG_AS_AVX */ - - -#if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX) #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ asmlinkage void sha1_transform_avx2(struct sha1_state *state, @@ -253,11 +245,6 @@ static void unregister_sha1_avx2(void) crypto_unregister_shash(&sha1_avx2_alg); } -#else -static inline int register_sha1_avx2(void) { return 0; } -static inline void unregister_sha1_avx2(void) { } -#endif - #ifdef CONFIG_AS_SHA1_NI asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data, int rounds); diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index fcbc30f58c38..4739cd31b9db 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -47,7 +47,6 @@ # This code schedules 1 block at a time, with 4 lanes per block ######################################################################## -#ifdef CONFIG_AS_AVX #include <linux/linkage.h> ## assume buffers not aligned @@ -498,5 +497,3 @@ _SHUF_00BA: # shuffle xDxC -> DC00 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF - -#endif diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 499d9ec129de..11ff60c29c8b 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -48,7 +48,6 @@ # This code schedules 2 blocks at a time, with 4 lanes per block ######################################################################## -#ifdef CONFIG_AS_AVX2 #include <linux/linkage.h> ## assume buffers not aligned @@ -767,5 +766,3 @@ _SHUF_00BA: .align 32 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF - -#endif diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 03ad657c04bd..6394b5fe8db6 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -144,7 +144,6 @@ static void unregister_sha256_ssse3(void) ARRAY_SIZE(sha256_ssse3_algs)); } -#ifdef CONFIG_AS_AVX asmlinkage void sha256_transform_avx(struct sha256_state *state, const u8 *data, int blocks); @@ -221,12 +220,6 @@ static void unregister_sha256_avx(void) ARRAY_SIZE(sha256_avx_algs)); } -#else -static inline int register_sha256_avx(void) { return 0; } -static inline void unregister_sha256_avx(void) { } -#endif - -#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) asmlinkage void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks); @@ -301,11 +294,6 @@ static void unregister_sha256_avx2(void) ARRAY_SIZE(sha256_avx2_algs)); } -#else -static inline int register_sha256_avx2(void) { return 0; } -static inline void unregister_sha256_avx2(void) { } -#endif - #ifdef CONFIG_AS_SHA256_NI asmlinkage void sha256_ni_transform(struct sha256_state *digest, const u8 *data, int rounds); diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 90ea945ba5e6..63470fd6ae32 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -47,7 +47,6 @@ # ######################################################################## -#ifdef CONFIG_AS_AVX #include <linux/linkage.h> .text @@ -424,4 +423,3 @@ K512: .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 -#endif diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 3dd886b14e7d..3a44bdcfd583 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -49,7 +49,6 @@ # This code schedules 1 blocks at a time, with 4 lanes per block ######################################################################## -#ifdef CONFIG_AS_AVX2 #include <linux/linkage.h> .text @@ -749,5 +748,3 @@ PSHUFFLE_BYTE_FLIP_MASK: MASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF - -#endif diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 1c444f41037c..82cc1b3ced1d 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -142,7 +142,6 @@ static void unregister_sha512_ssse3(void) ARRAY_SIZE(sha512_ssse3_algs)); } -#ifdef CONFIG_AS_AVX asmlinkage void sha512_transform_avx(struct sha512_state *state, const u8 *data, int blocks); static bool avx_usable(void) @@ -218,12 +217,7 @@ static void unregister_sha512_avx(void) crypto_unregister_shashes(sha512_avx_algs, ARRAY_SIZE(sha512_avx_algs)); } -#else -static inline int register_sha512_avx(void) { return 0; } -static inline void unregister_sha512_avx(void) { } -#endif -#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) asmlinkage void sha512_transform_rorx(struct sha512_state *state, const u8 *data, int blocks); @@ -298,10 +292,6 @@ static void unregister_sha512_avx2(void) crypto_unregister_shashes(sha512_avx2_algs, ARRAY_SIZE(sha512_avx2_algs)); } -#else -static inline int register_sha512_avx2(void) { return 0; } -static inline void unregister_sha512_avx2(void) { } -#endif static int __init sha512_ssse3_mod_init(void) { |