summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 09:54:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 09:54:19 -0800
commit5bcbe22ca47da04cda3a858cef67f55b550c1d13 (patch)
tree49bd61e32eb2d652085a49182436322a3e0e9840 /arch/x86
parent1db934a5b77a9e37c4742c704fde6af233187a98 (diff)
parent12cb3a1c4184f891d965d1f39f8cfcc9ef617647 (diff)
downloadlinux-5bcbe22ca47da04cda3a858cef67f55b550c1d13.tar.bz2
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Try to catch hash output overrun in testmgr - Introduce walksize attribute for batched walking - Make crypto_xor() and crypto_inc() alignment agnostic Algorithms: - Add time-invariant AES algorithm - Add standalone CBCMAC algorithm Drivers: - Add NEON acclerated chacha20 on ARM/ARM64 - Expose AES-CTR as synchronous skcipher on ARM64 - Add scalar AES implementation on ARM64 - Improve scalar AES implementation on ARM - Improve NEON AES implementation on ARM/ARM64 - Merge CRC32 and PMULL instruction based drivers on ARM64 - Add NEON acclerated CBCMAC/CMAC/XCBC AES on ARM64 - Add IPsec AUTHENC implementation in atmel - Add Support for Octeon-tx CPT Engine - Add Broadcom SPU driver - Add MediaTek driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (142 commits) crypto: xts - Add ECB dependency crypto: cavium - switch to pci_alloc_irq_vectors crypto: cavium - switch to pci_alloc_irq_vectors crypto: cavium - remove dead MSI-X related define crypto: brcm - Avoid double free in ahash_finup() crypto: cavium - fix Kconfig dependencies crypto: cavium - cpt_bind_vq_to_grp could return an error code crypto: doc - fix typo hwrng: omap - update Kconfig help description crypto: ccm - drop unnecessary minimum 32-bit alignment crypto: ccm - honour alignmask of subordinate MAC cipher crypto: caam - fix state buffer DMA (un)mapping crypto: caam - abstract ahash request double buffering crypto: caam - fix error path for ctx_dma mapping failure crypto: caam - fix DMA API leaks for multiple setkey() calls crypto: caam - don't dma_map key for hash algorithms crypto: caam - use dma_map_sg() return code crypto: caam - replace sg_count() with sg_nents_for_len() crypto: caam - check sg_count() return value crypto: caam - fix HW S/G in ablkcipher_giv_edesc_alloc() ..
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S37
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S32
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c12
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S5
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S12
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S14
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S12
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S9
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S7
-rw-r--r--arch/x86/crypto/chacha20_glue.c70
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S14
-rw-r--r--arch/x86/crypto/des3_ede-asm_64.S2
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S3
-rw-r--r--arch/x86/crypto/poly1305-avx2-x86_64.S6
-rw-r--r--arch/x86/crypto/poly1305-sse2-x86_64.S6
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S5
-rw-r--r--arch/x86/crypto/serpent-avx2-asm_64.S9
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S6
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S3
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_x8_avx2.S15
-rw-r--r--arch/x86/crypto/sha1_ni_asm.S8
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S9
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S9
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S6
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S3
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_x8_avx2.S7
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S8
-rw-r--r--arch/x86/crypto/sha256_ni_asm.S4
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S9
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S10
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c64
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S10
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S4
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_x4_avx2.S4
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S9
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S6
37 files changed, 313 insertions, 138 deletions
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 383a6f84a060..3c465184ff8a 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -46,28 +46,49 @@
#ifdef __x86_64__
-.data
+# constants in mergeable sections, linker can reorder and merge
+.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16
.align 16
.Lgf128mul_x_ble_mask:
.octa 0x00000000000000010000000000000087
+.section .rodata.cst16.POLY, "aM", @progbits, 16
+.align 16
POLY: .octa 0xC2000000000000000000000000000001
+.section .rodata.cst16.TWOONE, "aM", @progbits, 16
+.align 16
TWOONE: .octa 0x00000001000000000000000000000001
-# order of these constants should not change.
-# more specifically, ALL_F should follow SHIFT_MASK,
-# and ZERO should follow ALL_F
-
+.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
+.align 16
SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F
+.section .rodata.cst16.MASK1, "aM", @progbits, 16
+.align 16
MASK1: .octa 0x0000000000000000ffffffffffffffff
+.section .rodata.cst16.MASK2, "aM", @progbits, 16
+.align 16
MASK2: .octa 0xffffffffffffffff0000000000000000
-SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
-ALL_F: .octa 0xffffffffffffffffffffffffffffffff
-ZERO: .octa 0x00000000000000000000000000000000
+.section .rodata.cst16.ONE, "aM", @progbits, 16
+.align 16
ONE: .octa 0x00000000000000000000000000000001
+.section .rodata.cst16.F_MIN_MASK, "aM", @progbits, 16
+.align 16
F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0
+.section .rodata.cst16.dec, "aM", @progbits, 16
+.align 16
dec: .octa 0x1
+.section .rodata.cst16.enc, "aM", @progbits, 16
+.align 16
enc: .octa 0x2
+# order of these constants should not change.
+# more specifically, ALL_F should follow SHIFT_MASK,
+# and zero should follow ALL_F
+.section .rodata, "a", @progbits
+.align 16
+SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ALL_F: .octa 0xffffffffffffffffffffffffffffffff
+ .octa 0x00000000000000000000000000000000
+
.text
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 522ab68d1c88..d664382c6e56 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -122,23 +122,39 @@
#include <linux/linkage.h>
#include <asm/inst.h>
-.data
+# constants in mergeable sections, linker can reorder and merge
+.section .rodata.cst16.POLY, "aM", @progbits, 16
.align 16
-
POLY: .octa 0xC2000000000000000000000000000001
+
+.section .rodata.cst16.POLY2, "aM", @progbits, 16
+.align 16
POLY2: .octa 0xC20000000000000000000001C2000000
-TWOONE: .octa 0x00000001000000000000000000000001
-# order of these constants should not change.
-# more specifically, ALL_F should follow SHIFT_MASK, and ZERO should follow ALL_F
+.section .rodata.cst16.TWOONE, "aM", @progbits, 16
+.align 16
+TWOONE: .octa 0x00000001000000000000000000000001
+.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
+.align 16
SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F
-SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
-ALL_F: .octa 0xffffffffffffffffffffffffffffffff
-ZERO: .octa 0x00000000000000000000000000000000
+
+.section .rodata.cst16.ONE, "aM", @progbits, 16
+.align 16
ONE: .octa 0x00000000000000000000000000000001
+
+.section .rodata.cst16.ONEf, "aM", @progbits, 16
+.align 16
ONEf: .octa 0x01000000000000000000000000000000
+# order of these constants should not change.
+# more specifically, ALL_F should follow SHIFT_MASK, and zero should follow ALL_F
+.section .rodata, "a", @progbits
+.align 16
+SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ALL_F: .octa 0xffffffffffffffffffffffffffffffff
+ .octa 0x00000000000000000000000000000000
+
.text
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 7ff1b0c86a8e..93de8ea51548 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -740,9 +740,11 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
*((__be32 *)(iv+12)) = counter;
if (sg_is_last(req->src) &&
- req->src->offset + req->src->length <= PAGE_SIZE &&
+ (!PageHighMem(sg_page(req->src)) ||
+ req->src->offset + req->src->length <= PAGE_SIZE) &&
sg_is_last(req->dst) &&
- req->dst->offset + req->dst->length <= PAGE_SIZE) {
+ (!PageHighMem(sg_page(req->dst)) ||
+ req->dst->offset + req->dst->length <= PAGE_SIZE)) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
assoc = scatterwalk_map(&src_sg_walk);
@@ -822,9 +824,11 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
*((__be32 *)(iv+12)) = counter;
if (sg_is_last(req->src) &&
- req->src->offset + req->src->length <= PAGE_SIZE &&
+ (!PageHighMem(sg_page(req->src)) ||
+ req->src->offset + req->src->length <= PAGE_SIZE) &&
sg_is_last(req->dst) &&
- req->dst->offset + req->dst->length <= PAGE_SIZE) {
+ (!PageHighMem(sg_page(req->dst)) ||
+ req->dst->offset + req->dst->length <= PAGE_SIZE)) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
assoc = scatterwalk_map(&src_sg_walk);
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index aa9e8bd163f6..f7c495e2863c 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -571,7 +571,9 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu y6, 14 * 16(rio); \
vmovdqu y7, 15 * 16(rio);
-.data
+
+/* NB: section is mergeable, all elements must be aligned 16-byte blocks */
+.section .rodata.cst16, "aM", @progbits, 16
.align 16
#define SHUFB_BYTES(idx) \
@@ -711,6 +713,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
/* 4-bit mask */
+.section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
.align 4
.L0f0f0f0f:
.long 0x0f0f0f0f
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 16186c18656d..eee5b3982cfd 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -610,20 +610,25 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
vmovdqu y6, 14 * 32(rio); \
vmovdqu y7, 15 * 32(rio);
-.data
-.align 32
+.section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32
+.align 32
#define SHUFB_BYTES(idx) \
0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)
-
.Lshufb_16x16b:
.byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
.byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
+.section .rodata.cst32.pack_bswap, "aM", @progbits, 32
+.align 32
.Lpack_bswap:
.long 0x00010203, 0x04050607, 0x80808080, 0x80808080
.long 0x00010203, 0x04050607, 0x80808080, 0x80808080
+/* NB: section is mergeable, all elements must be aligned 16-byte blocks */
+.section .rodata.cst16, "aM", @progbits, 16
+.align 16
+
/* For CTR-mode IV byteswap */
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
@@ -750,6 +755,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+.section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
.align 4
/* 4-bit mask */
.L0f0f0f0f:
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index 14fa1966bf01..b4a8806234ea 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -195,19 +195,29 @@
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1;
-.data
-
+.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
.align 16
.Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
+.align 16
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+.section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
+.align 16
.Lbswap_iv_mask:
.byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
+
+.section .rodata.cst4.16_mask, "aM", @progbits, 4
+.align 4
.L16_mask:
.byte 16, 16, 16, 16
+.section .rodata.cst4.32_mask, "aM", @progbits, 4
+.align 4
.L32_mask:
.byte 32, 0, 0, 0
+.section .rodata.cst4.first_mask, "aM", @progbits, 4
+.align 4
.Lfirst_mask:
.byte 0x1f, 0, 0, 0
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index c419389889cd..952d3156a933 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -225,8 +225,7 @@
vpshufb rmask, x2, x2; \
vpshufb rmask, x3, x3;
-.data
-
+.section .rodata.cst16, "aM", @progbits, 16
.align 16
.Lxts_gf128mul_and_shl1_mask:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
@@ -244,10 +243,19 @@
.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+.section .rodata.cst4.L16_mask, "aM", @progbits, 4
+.align 4
.L16_mask:
.byte 16, 16, 16, 16
+
+.section .rodata.cst4.L32_mask, "aM", @progbits, 4
+.align 4
.L32_mask:
.byte 32, 0, 0, 0
+
+.section .rodata.cst4.first_mask, "aM", @progbits, 4
+.align 4
.Lfirst_mask:
.byte 0x1f, 0, 0, 0
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 16694e625f77..3a2dc3dc6cac 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -11,13 +11,18 @@
#include <linux/linkage.h>
-.data
+.section .rodata.cst32.ROT8, "aM", @progbits, 32
.align 32
-
ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
.octa 0x0e0d0c0f0a09080b0605040702010003
+
+.section .rodata.cst32.ROT16, "aM", @progbits, 32
+.align 32
ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
.octa 0x0d0c0f0e09080b0a0504070601000302
+
+.section .rodata.cst32.CTRINC, "aM", @progbits, 32
+.align 32
CTRINC: .octa 0x00000003000000020000000100000000
.octa 0x00000007000000060000000500000004
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3a33124e9112..3f511a7d73b8 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -11,11 +11,14 @@
#include <linux/linkage.h>
-.data
+.section .rodata.cst16.ROT8, "aM", @progbits, 16
.align 16
-
ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
+.section .rodata.cst16.ROT16, "aM", @progbits, 16
+.align 16
ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
+.section .rodata.cst16.CTRINC, "aM", @progbits, 16
+.align 16
CTRINC: .octa 0x00000003000000020000000100000000
.text
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index f910d1d449f0..1e6af1b35f7b 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -11,7 +11,7 @@
#include <crypto/algapi.h>
#include <crypto/chacha20.h>
-#include <linux/crypto.h>
+#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -63,36 +63,37 @@ static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
}
}
-static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int chacha20_simd(struct skcipher_request *req)
{
- u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *state, state_buf[16 + 2] __aligned(8);
+ struct skcipher_walk walk;
int err;
- if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd())
- return crypto_chacha20_crypt(desc, dst, src, nbytes);
+ BUILD_BUG_ON(CHACHA20_STATE_ALIGN != 16);
+ state = PTR_ALIGN(state_buf + 0, CHACHA20_STATE_ALIGN);
- state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
+ if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd())
+ return crypto_chacha20_crypt(req);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, true);
- crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+ crypto_chacha20_init(state, ctx, walk.iv);
kernel_fpu_begin();
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
- err = blkcipher_walk_done(desc, &walk,
- walk.nbytes % CHACHA20_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % CHACHA20_BLOCK_SIZE);
}
if (walk.nbytes) {
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
kernel_fpu_end();
@@ -100,27 +101,22 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "chacha20",
- .cra_driver_name = "chacha20-simd",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct chacha20_ctx),
- .cra_alignmask = sizeof(u32) - 1,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .geniv = "seqiv",
- .setkey = crypto_chacha20_setkey,
- .encrypt = chacha20_simd,
- .decrypt = chacha20_simd,
- },
- },
+static struct skcipher_alg alg = {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-simd",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha20_ctx),
+ .base.cra_alignmask = sizeof(u32) - 1,
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA20_KEY_SIZE,
+ .max_keysize = CHACHA20_KEY_SIZE,
+ .ivsize = CHACHA20_IV_SIZE,
+ .chunksize = CHACHA20_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = chacha20_simd,
+ .decrypt = chacha20_simd,
};
static int __init chacha20_simd_mod_init(void)
@@ -133,12 +129,12 @@ static int __init chacha20_simd_mod_init(void)
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
#endif
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit chacha20_simd_mod_fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(chacha20_simd_mod_init);
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index dc05f010ca9b..7a7de27c6f41 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -312,7 +312,7 @@ do_return:
ret
ENDPROC(crc_pcl)
-.section .rodata, "a", %progbits
+.section .rodata, "a", @progbits
################################################################
## jump table Table is 129 entries x 2 bytes each
################################################################
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
index 35e97569d05f..de04d3e98d8d 100644
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S
@@ -554,12 +554,11 @@ _only_less_than_2:
ENDPROC(crc_t10dif_pcl)
-.data
-
+.section .rodata, "a", @progbits
+.align 16
# precomputed constants
# these constants are precomputed from the poly:
# 0x8bb70000 (0x8bb7 scaled to 32 bits)
-.align 16
# Q = 0x18BB70000
# rk1 = 2^(32*3) mod Q << 32
# rk2 = 2^(32*5) mod Q << 32
@@ -613,14 +612,23 @@ rk20:
+.section .rodata.cst16.mask1, "aM", @progbits, 16
+.align 16
mask1:
.octa 0x80808080808080808080808080808080
+
+.section .rodata.cst16.mask2, "aM", @progbits, 16
+.align 16
mask2:
.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
+.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
+.align 16
SHUF_MASK:
.octa 0x000102030405060708090A0B0C0D0E0F
+.section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32
+.align 32
pshufb_shf_table:
# use these values for shift constants for the pshufb instruction
# different alignments result in values as shown:
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
index 038f6ae87c5e..f3e91647ca27 100644
--- a/arch/x86/crypto/des3_ede-asm_64.S
+++ b/arch/x86/crypto/des3_ede-asm_64.S
@@ -537,7 +537,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
ret;
ENDPROC(des3_ede_x86_64_crypt_blk_3way)
-.data
+.section .rodata, "a", @progbits
.align 16
.L_s1:
.quad 0x0010100001010400, 0x0000000000000000
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index eed55c8cca4f..f94375a8dcd1 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -20,8 +20,7 @@
#include <asm/inst.h>
#include <asm/frame.h>
-.data
-
+.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
.align 16
.Lbswap_mask:
.octa 0x000102030405060708090a0b0c0d0e0f
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index eff2f414e22b..3b6e70d085da 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -11,11 +11,13 @@
#include <linux/linkage.h>
-.data
+.section .rodata.cst32.ANMASK, "aM", @progbits, 32
.align 32
-
ANMASK: .octa 0x0000000003ffffff0000000003ffffff
.octa 0x0000000003ffffff0000000003ffffff
+
+.section .rodata.cst32.ORMASK, "aM", @progbits, 32
+.align 32
ORMASK: .octa 0x00000000010000000000000001000000
.octa 0x00000000010000000000000001000000
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index 338c748054ed..c88c670cb5fc 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -11,10 +11,12 @@
#include <linux/linkage.h>
-.data
+.section .rodata.cst16.ANMASK, "aM", @progbits, 16
.align 16
-
ANMASK: .octa 0x0000000003ffffff0000000003ffffff
+
+.section .rodata.cst16.ORMASK, "aM", @progbits, 16
+.align 16
ORMASK: .octa 0x00000000010000000000000001000000
.text
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index 8be571808342..2925077f8c6a 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -29,11 +29,12 @@
.file "serpent-avx-x86_64-asm_64.S"
-.data
+.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
.align 16
-
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16
+.align 16
.Lxts_gf128mul_and_shl1_mask:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
index 97c48add33ed..d67888f2a52a 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -20,13 +20,18 @@
.file "serpent-avx2-asm_64.S"
-.data
+.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
.align 16
-
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+.section .rodata.cst16.xts_gf128mul_and_shl1_mask_0, "aM", @progbits, 16
+.align 16
.Lxts_gf128mul_and_shl1_mask_0:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
+
+.section .rodata.cst16.xts_gf128mul_and_shl1_mask_1, "aM", @progbits, 16
+.align 16
.Lxts_gf128mul_and_shl1_mask_1:
.byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 96df6a39d7e2..93b945597ecf 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -281,11 +281,13 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
ret
ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
-.data
-
+.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
.align 16
clear_low_nibble:
.octa 0x000000000000000000000000FFFFFFF0
+
+.section .rodata.cst8, "aM", @progbits, 8
+.align 8
one:
.quad 1
two:
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
index 63a0d9c8e31f..7a93b1c0d69a 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
@@ -203,8 +203,7 @@ return_null:
ENDPROC(sha1_mb_mgr_submit_avx2)
-.data
-
+.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
.align 16
clear_low_nibble:
.octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
index c9dae1cd2919..20f77aa633de 100644
--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
@@ -461,21 +461,32 @@ lloop:
ENDPROC(sha1_x8_avx2)
-.data
-
+.section .rodata.cst32.K00_19, "aM", @progbits, 32
.align 32
K00_19:
.octa 0x5A8279995A8279995A8279995A827999
.octa 0x5A8279995A8279995A8279995A827999
+
+.section .rodata.cst32.K20_39, "aM", @progbits, 32
+.align 32
K20_39:
.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+
+.section .rodata.cst32.K40_59, "aM", @progbits, 32
+.align 32
K40_59:
.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+
+.section .rodata.cst32.K60_79, "aM", @progbits, 32
+.align 32
K60_79:
.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+
+.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
+.align 32
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x0c0d0e0f08090a0b0405060700010203
.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
index 874a651b9e7d..ebbdba72ae07 100644
--- a/arch/x86/crypto/sha1_ni_asm.S
+++ b/arch/x86/crypto/sha1_ni_asm.S
@@ -293,10 +293,12 @@ ENTRY(sha1_ni_transform)
ret
ENDPROC(sha1_ni_transform)
-.data
-
-.align 64
+.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
+.align 16
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x000102030405060708090a0b0c0d0e0f
+
+.section .rodata.cst16.UPPER_WORD_MASK, "aM", @progbits, 16
+.align 16
UPPER_WORD_MASK:
.octa 0xFFFFFFFF000000000000000000000000
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 92b3b5d75ba9..e08888a1a5f2 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -463,7 +463,7 @@ done_hash:
ret
ENDPROC(sha256_transform_avx)
-.data
+.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
@@ -483,14 +483,21 @@ K256:
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
+.align 16
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x0c0d0e0f08090a0b0405060700010203
+.section .rodata.cst16._SHUF_00BA, "aM", @progbits, 16
+.align 16
# shuffle xBxA -> 00BA
_SHUF_00BA:
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
+.section .rodata.cst16._SHUF_DC00, "aM", @progbits, 16
+.align 16
# shuffle xDxC -> DC00
_SHUF_DC00:
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
+
#endif
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 570ec5ec62d7..89c8f09787d2 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -723,7 +723,7 @@ done_hash:
ret
ENDPROC(sha256_transform_rorx)
-.data
+.section .rodata.cst512.K256, "aM", @progbits, 512
.align 64
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
@@ -759,14 +759,21 @@ K256:
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
+.align 32
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203
# shuffle xBxA -> 00BA
+.section .rodata.cst32._SHUF_00BA, "aM", @progbits, 32
+.align 32
_SHUF_00BA:
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100
# shuffle xDxC -> DC00
+.section .rodata.cst32._SHUF_DC00, "aM", @progbits, 32
+.align 32
_SHUF_DC00:
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF
+
#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index a78a0694ddef..8fe6338bcc84 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -284,11 +284,13 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
ret
ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
-.data
-
+.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
.align 16
clear_low_nibble:
.octa 0x000000000000000000000000FFFFFFF0
+
+.section .rodata.cst8, "aM", @progbits, 8
+.align 8
one:
.quad 1
two:
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
index 7ea670e25acc..b36ae7454084 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
@@ -208,8 +208,7 @@ return_null:
ENDPROC(sha256_mb_mgr_submit_avx2)
-.data
-
+.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
.align 16
clear_low_nibble:
.octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
index aa21aea4c722..1687c80c5995 100644
--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
@@ -437,7 +437,8 @@ Lrounds_16_xx:
ret
ENDPROC(sha256_x8_avx2)
-.data
+
+.section .rodata.K256_8, "a", @progbits
.align 64
K256_8:
.octa 0x428a2f98428a2f98428a2f98428a2f98
@@ -568,10 +569,14 @@ K256_8:
.octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
.octa 0xc67178f2c67178f2c67178f2c67178f2
.octa 0xc67178f2c67178f2c67178f2c67178f2
+
+.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
+.align 32
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x0c0d0e0f08090a0b0405060700010203
.octa 0x0c0d0e0f08090a0b0405060700010203
+.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
.global K256
K256:
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index 2cedc44e8121..39b83c93e7fd 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -474,7 +474,7 @@ done_hash:
ret
ENDPROC(sha256_transform_ssse3)
-.data
+.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
@@ -494,13 +494,19 @@ K256:
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
+.align 16
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x0c0d0e0f08090a0b0405060700010203
+.section .rodata.cst16._SHUF_00BA, "aM", @progbits, 16
+.align 16
# shuffle xBxA -> 00BA
_SHUF_00BA:
.octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
+.section .rodata.cst16._SHUF_DC00, "aM", @progbits, 16
+.align 16
# shuffle xDxC -> DC00
_SHUF_DC00:
.octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
index 748cdf21a938..fb58f58ecfbc 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@ -329,7 +329,7 @@ ENTRY(sha256_ni_transform)
ret
ENDPROC(sha256_ni_transform)
-.data
+.section .rodata.cst256.K256, "aM", @progbits, 256
.align 64
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
@@ -349,5 +349,7 @@ K256:
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
+.align 16
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 565274d6a641..39235fefe6f7 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -370,14 +370,17 @@ ENDPROC(sha512_transform_avx)
########################################################################
### Binary Data
-.data
-
+.section .rodata.cst16.XMM_QWORD_BSWAP, "aM", @progbits, 16
.align 16
-
# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
XMM_QWORD_BSWAP:
.octa 0x08090a0b0c0d0e0f0001020304050607
+# Mergeable 640-byte rodata section. This allows linker to merge the table
+# with other, exactly the same 640-byte fragment of another rodata section
+# (if such section exists).
+.section .rodata.cst640.K512, "aM", @progbits, 640
+.align 64
# K[t] used in SHA512 hashing
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index 1f20b35d8573..7f5f6c6ec72e 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -684,8 +684,11 @@ ENDPROC(sha512_transform_rorx)
########################################################################
### Binary Data
-.data
+# Mergeable 640-byte rodata section. This allows linker to merge the table
+# with other, exactly the same 640-byte fragment of another rodata section
+# (if such section exists).
+.section .rodata.cst640.K512, "aM", @progbits, 640
.align 64
# K[t] used in SHA512 hashing
K512:
@@ -730,14 +733,17 @@ K512:
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
.align 32
-
# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
PSHUFFLE_BYTE_FLIP_MASK:
.octa 0x08090a0b0c0d0e0f0001020304050607
.octa 0x18191a1b1c1d1e1f1011121314151617
+.section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32
+.align 32
MASK_YMM_LO:
.octa 0x00000000000000000000000000000000
.octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
+
#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 9c1bb6d58141..2dd3674b5a1e 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -221,7 +221,7 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
}
static struct sha512_hash_ctx
- *sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr *mgr)
+ *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
{
/*
* If get_comp_job returns NULL, there are no jobs complete.
@@ -233,11 +233,17 @@ static struct sha512_hash_ctx
* Otherwise, all jobs currently being managed by the hash_ctx_mgr
* still need processing.
*/
+ struct sha512_ctx_mgr *mgr;
struct sha512_hash_ctx *ctx;
+ unsigned long flags;
+ mgr = cstate->mgr;
+ spin_lock_irqsave(&cstate->work_lock, flags);
ctx = (struct sha512_hash_ctx *)
sha512_job_mgr_get_comp_job(&mgr->mgr);
- return sha512_ctx_mgr_resubmit(mgr, ctx);
+ ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
+ spin_unlock_irqrestore(&cstate->work_lock, flags);
+ return ctx;
}
static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
@@ -246,12 +252,17 @@ static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
}
static struct sha512_hash_ctx
- *sha512_ctx_mgr_submit(struct sha512_ctx_mgr *mgr,
+ *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
struct sha512_hash_ctx *ctx,
const void *buffer,
uint32_t len,
int flags)
{
+ struct sha512_ctx_mgr *mgr;
+ unsigned long irqflags;
+
+ mgr = cstate->mgr;
+ spin_lock_irqsave(&cstate->work_lock, irqflags);
if (flags & (~HASH_ENTIRE)) {
/*
* User should not pass anything other than FIRST, UPDATE, or
@@ -351,20 +362,26 @@ static struct sha512_hash_ctx
}
}
- return sha512_ctx_mgr_resubmit(mgr, ctx);
+ ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
+ spin_unlock_irqrestore(&cstate->work_lock, irqflags);
+ return ctx;
}
-static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
+static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
{
+ struct sha512_ctx_mgr *mgr;
struct sha512_hash_ctx *ctx;
+ unsigned long flags;
+ mgr = cstate->mgr;
+ spin_lock_irqsave(&cstate->work_lock, flags);
while (1) {
ctx = (struct sha512_hash_ctx *)
sha512_job_mgr_flush(&mgr->mgr);
/* If flush returned 0, there are no more jobs in flight. */
if (!ctx)
- return NULL;
+ break;
/*
* If flush returned a job, resubmit the job to finish
@@ -378,8 +395,10 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
* the sha512_ctx_mgr still need processing. Loop.
*/
if (ctx)
- return ctx;
+ break;
}
+ spin_unlock_irqrestore(&cstate->work_lock, flags);
+ return ctx;
}
static int sha512_mb_init(struct ahash_request *areq)
@@ -439,11 +458,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
sha_ctx = (struct sha512_hash_ctx *)
ahash_request_ctx(&rctx->areq);
kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx,
+ sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
rctx->walk.data, nbytes, flag);
if (!sha_ctx) {
if (flush)
- sha_ctx = sha512_ctx_mgr_flush(cstate->mgr);
+ sha_ctx = sha512_ctx_mgr_flush(cstate);
}
kernel_fpu_end();
if (sha_ctx)
@@ -471,11 +490,12 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
struct sha512_hash_ctx *sha_ctx;
struct mcryptd_hash_request_ctx *req_ctx;
int ret;
+ unsigned long flags;
/* remove from work list */
- spin_lock(&cstate->work_lock);
+ spin_lock_irqsave(&cstate->work_lock, flags);
list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
+ spin_unlock_irqrestore(&cstate->work_lock, flags);
if (irqs_disabled())
rctx->complete(&req->base, err);
@@ -486,14 +506,14 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
}
/* check to see if there are other jobs that are done */
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
+ sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
while (sha_ctx) {
req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
ret = sha_finish_walk(&req_ctx, cstate, false);
if (req_ctx) {
- spin_lock(&cstate->work_lock);
+ spin_lock_irqsave(&cstate->work_lock, flags);
list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
+ spin_unlock_irqrestore(&cstate->work_lock, flags);
req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled())
@@ -504,7 +524,7 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
local_bh_enable();
}
}
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
+ sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
}
return 0;
@@ -515,6 +535,7 @@ static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
{
unsigned long next_flush;
unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
+ unsigned long flags;
/* initialize tag */
rctx->tag.arrival = jiffies; /* tag the arrival time */
@@ -522,9 +543,9 @@ static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
next_flush = rctx->tag.arrival + delay;
rctx->tag.expire = next_flush;
- spin_lock(&cstate->work_lock);
+ spin_lock_irqsave(&cstate->work_lock, flags);
list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
+ spin_unlock_irqrestore(&cstate->work_lock, flags);
mcryptd_arm_flusher(cstate, delay);
}
@@ -565,7 +586,7 @@ static int sha512_mb_update(struct ahash_request *areq)
sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
sha512_mb_add_list(rctx, cstate);
kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
nbytes, HASH_UPDATE);
kernel_fpu_end();
@@ -628,7 +649,7 @@ static int sha512_mb_finup(struct ahash_request *areq)
sha512_mb_add_list(rctx, cstate);
kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
+ sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
nbytes, flag);
kernel_fpu_end();
@@ -677,8 +698,7 @@ static int sha512_mb_final(struct ahash_request *areq)
/* flag HASH_FINAL and 0 data size */
sha512_mb_add_list(rctx, cstate);
kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
+ sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
kernel_fpu_end();
/* check if anything is returned */
@@ -940,7 +960,7 @@ static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
break;
kernel_fpu_begin();
sha_ctx = (struct sha512_hash_ctx *)
- sha512_ctx_mgr_flush(cstate->mgr);
+ sha512_ctx_mgr_flush(cstate);
kernel_fpu_end();
if (!sha_ctx) {
pr_err("sha512_mb error: nothing got flushed for"
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
index 3ddba19a0db6..7c629caebc05 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
@@ -280,12 +280,18 @@ ENTRY(sha512_mb_mgr_get_comp_job_avx2)
pop %rbx
ret
ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
-.data
-.align 16
+.section .rodata.cst8.one, "aM", @progbits, 8
+.align 8
one:
.quad 1
+
+.section .rodata.cst8.two, "aM", @progbits, 8
+.align 8
two:
.quad 2
+
+.section .rodata.cst8.three, "aM", @progbits, 8
+.align 8
three:
.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
index 815f07bdd1f8..4ba709ba78e5 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
@@ -209,8 +209,9 @@ return_null:
xor job_rax, job_rax
jmp return
ENDPROC(sha512_mb_mgr_submit_avx2)
-.data
+/* UNUSED?
+.section .rodata.cst16, "aM", @progbits, 16
.align 16
H0: .int 0x6a09e667
H1: .int 0xbb67ae85
@@ -220,3 +221,4 @@ H4: .int 0x510e527f
H5: .int 0x9b05688c
H6: .int 0x1f83d9ab
H7: .int 0x5be0cd19
+*/
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
index 31ab1eff6413..e22e907643a6 100644
--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+++ b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
@@ -361,7 +361,7 @@ Lrounds_16_xx:
ret
ENDPROC(sha512_x4_avx2)
-.data
+.section .rodata.K512_4, "a", @progbits
.align 64
K512_4:
.octa 0x428a2f98d728ae22428a2f98d728ae22,\
@@ -525,5 +525,7 @@ K512_4:
.octa 0x6c44198c4a4758176c44198c4a475817,\
0x6c44198c4a4758176c44198c4a475817
+.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
+.align 32
PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
.octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index e610e29cbc81..66bbd9058a90 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -369,14 +369,17 @@ ENDPROC(sha512_transform_ssse3)
########################################################################
### Binary Data
-.data
-
+.section .rodata.cst16.XMM_QWORD_BSWAP, "aM", @progbits, 16
.align 16
-
# Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
XMM_QWORD_BSWAP:
.octa 0x08090a0b0c0d0e0f0001020304050607
+# Mergeable 640-byte rodata section. This allows linker to merge the table
+# with other, exactly the same 640-byte fragment of another rodata section
+# (if such section exists).
+.section .rodata.cst640.K512, "aM", @progbits, 640
+.align 64
# K[t] used in SHA512 hashing
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index dc66273e610d..b3f49d286348 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -29,11 +29,13 @@
.file "twofish-avx-x86_64-asm_64.S"
-.data
+.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
.align 16
-
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16
+.align 16
.Lxts_gf128mul_and_shl1_mask:
.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0