From 84e31fdb7c797a7303e0cc295cb9bc8b73fb872d Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 14 Jan 2012 21:27:37 +0300 Subject: crypto: sha512 - make it work, undo percpu message schedule commit f9e2bca6c22d75a289a349f869701214d63b5060 aka "crypto: sha512 - Move message schedule W[80] to static percpu area" created global message schedule area. If sha512_update will ever be entered twice, hash will be silently calculated incorrectly. Probably the easiest way to notice incorrect hashes being calculated is to run 2 ping floods over AH with hmac(sha512): #!/usr/sbin/setkey -f flush; spdflush; add IP1 IP2 ah 25 -A hmac-sha512 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000025; add IP2 IP1 ah 52 -A hmac-sha512 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000052; spdadd IP1 IP2 any -P out ipsec ah/transport//require; spdadd IP2 IP1 any -P in ipsec ah/transport//require; XfrmInStateProtoError will start ticking with -EBADMSG being returned from ah_input(). This never happens with, say, hmac(sha1). With patch applied (on BOTH sides), XfrmInStateProtoError does not tick with multiple bidirectional ping flood streams like it doesn't tick with SHA-1. After this patch sha512_transform() will start using ~750 bytes of stack on x86_64. This is OK for simple loads, for something more heavy, stack reduction will be done separatedly. Signed-off-by: Alexey Dobriyan Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 9ed9f60316e5..8b9035b0189c 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -21,8 +21,6 @@ #include #include -static DEFINE_PER_CPU(u64[80], msg_schedule); - static inline u64 Ch(u64 x, u64 y, u64 z) { return z ^ (x & (y ^ z)); @@ -89,7 +87,7 @@ sha512_transform(u64 *state, const u8 *input) u64 a, b, c, d, e, f, g, h, t1, t2; int i; - u64 *W = get_cpu_var(msg_schedule); + u64 W[80]; /* load the input */ for (i = 0; i < 16; i++) @@ -128,8 +126,6 @@ sha512_transform(u64 *state, const u8 *input) /* erase our data */ a = b = c = d = e = f = g = h = t1 = t2 = 0; - memset(W, 0, sizeof(__get_cpu_var(msg_schedule))); - put_cpu_var(msg_schedule); } static int -- cgit v1.2.3 From 51fc6dc8f948047364f7d42a4ed89b416c6cc0a3 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 14 Jan 2012 21:40:57 +0300 Subject: crypto: sha512 - reduce stack usage to safe number For rounds 16--79, W[i] only depends on W[i - 2], W[i - 7], W[i - 15] and W[i - 16]. Consequently, keeping all W[80] array on stack is unnecessary, only 16 values are really needed. Using W[16] instead of W[80] greatly reduces stack usage (~750 bytes to ~340 bytes on x86_64). Line by line explanation: * BLEND_OP array is "circular" now, all indexes have to be modulo 16. Round number is positive, so remainder operation should be without surprises. * initial full message scheduling is trimmed to first 16 values which come from data block, the rest is calculated before it's needed. * original loop body is unrolled version of new SHA512_0_15 and SHA512_16_79 macros, unrolling was done to not do explicit variable renaming. Otherwise it's the very same code after preprocessing. See sha1_transform() code which does the same trick. Patch survives in-tree crypto test and original bugreport test (ping flood with hmac(sha512). See FIPS 180-2 for SHA-512 definition http://csrc.nist.gov/publications/fips/fips180-2/fips180-2withchangenotice.pdf Signed-off-by: Alexey Dobriyan Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 58 +++++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 24 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 8b9035b0189c..88f160b77b1f 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -78,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input) static inline void BLEND_OP(int I, u64 *W) { - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; + W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]); } static void @@ -87,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input) u64 a, b, c, d, e, f, g, h, t1, t2; int i; - u64 W[80]; + u64 W[16]; /* load the input */ for (i = 0; i < 16; i++) LOAD_OP(i, W, input); - for (i = 16; i < 80; i++) { - BLEND_OP(i, W); - } - /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; - /* now iterate */ - for (i=0; i<80; i+=8) { - t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; +#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \ + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \ + t2 = e0(a) + Maj(a, b, c); \ + d += t1; \ + h = t1 + t2 + +#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \ + BLEND_OP(i, W); \ + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \ + t2 = e0(a) + Maj(a, b, c); \ + d += t1; \ + h = t1 + t2 + + for (i = 0; i < 16; i += 8) { + SHA512_0_15(i, a, b, c, d, e, f, g, h); + SHA512_0_15(i + 1, h, a, b, c, d, e, f, g); + SHA512_0_15(i + 2, g, h, a, b, c, d, e, f); + SHA512_0_15(i + 3, f, g, h, a, b, c, d, e); + SHA512_0_15(i + 4, e, f, g, h, a, b, c, d); + SHA512_0_15(i + 5, d, e, f, g, h, a, b, c); + SHA512_0_15(i + 6, c, d, e, f, g, h, a, b); + SHA512_0_15(i + 7, b, c, d, e, f, g, h, a); + } + for (i = 16; i < 80; i += 8) { + SHA512_16_79(i, a, b, c, d, e, f, g, h); + SHA512_16_79(i + 1, h, a, b, c, d, e, f, g); + SHA512_16_79(i + 2, g, h, a, b, c, d, e, f); + SHA512_16_79(i + 3, f, g, h, a, b, c, d, e); + SHA512_16_79(i + 4, e, f, g, h, a, b, c, d); + SHA512_16_79(i + 5, d, e, f, g, h, a, b, c); + SHA512_16_79(i + 6, c, d, e, f, g, h, a, b); + SHA512_16_79(i + 7, b, c, d, e, f, g, h, a); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; -- cgit v1.2.3 From 58d7d18b5268febb8b1391c6dffc8e2aaa751fcd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 26 Jan 2012 15:03:16 +1100 Subject: crypto: sha512 - Use binary and instead of modulus The previous patch used the modulus operator over a power of 2 unnecessarily which may produce suboptimal binary code. This patch changes changes them to binary ands instead. Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 88f160b77b1f..3edebfd4dbec 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -78,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input) static inline void BLEND_OP(int I, u64 *W) { - W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]); + W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); } static void @@ -105,7 +105,7 @@ sha512_transform(u64 *state, const u8 *input) #define SHA512_16_79(i, a, b, c, d, e, f, g, h) \ BLEND_OP(i, W); \ - t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \ + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)&15]; \ t2 = e0(a) + Maj(a, b, c); \ d += t1; \ h = t1 + t2 -- cgit v1.2.3 From 3a92d687c8015860a19213e3c102cad6b722f83c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 5 Feb 2012 15:09:28 +1100 Subject: crypto: sha512 - Avoid stack bloat on i386 Unfortunately in reducing W from 80 to 16 we ended up unrolling the loop twice. As gcc has issues dealing with 64-bit ops on i386 this means that we end up using even more stack space (>1K). This patch solves the W reduction by moving LOAD_OP/BLEND_OP into the loop itself, thus avoiding the need to duplicate it. While the stack space still isn't great (>0.5K) it is at least in the same ball park as the amount of stack used for our C sha1 implementation. Note that this patch basically reverts to the original code so the diff looks bigger than it really is. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 68 +++++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 3edebfd4dbec..f04af931a682 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -89,46 +89,42 @@ sha512_transform(u64 *state, const u8 *input) int i; u64 W[16]; - /* load the input */ - for (i = 0; i < 16; i++) - LOAD_OP(i, W, input); - /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; -#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \ - t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \ - t2 = e0(a) + Maj(a, b, c); \ - d += t1; \ - h = t1 + t2 - -#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \ - BLEND_OP(i, W); \ - t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)&15]; \ - t2 = e0(a) + Maj(a, b, c); \ - d += t1; \ - h = t1 + t2 - - for (i = 0; i < 16; i += 8) { - SHA512_0_15(i, a, b, c, d, e, f, g, h); - SHA512_0_15(i + 1, h, a, b, c, d, e, f, g); - SHA512_0_15(i + 2, g, h, a, b, c, d, e, f); - SHA512_0_15(i + 3, f, g, h, a, b, c, d, e); - SHA512_0_15(i + 4, e, f, g, h, a, b, c, d); - SHA512_0_15(i + 5, d, e, f, g, h, a, b, c); - SHA512_0_15(i + 6, c, d, e, f, g, h, a, b); - SHA512_0_15(i + 7, b, c, d, e, f, g, h, a); - } - for (i = 16; i < 80; i += 8) { - SHA512_16_79(i, a, b, c, d, e, f, g, h); - SHA512_16_79(i + 1, h, a, b, c, d, e, f, g); - SHA512_16_79(i + 2, g, h, a, b, c, d, e, f); - SHA512_16_79(i + 3, f, g, h, a, b, c, d, e); - SHA512_16_79(i + 4, e, f, g, h, a, b, c, d); - SHA512_16_79(i + 5, d, e, f, g, h, a, b, c); - SHA512_16_79(i + 6, c, d, e, f, g, h, a, b); - SHA512_16_79(i + 7, b, c, d, e, f, g, h, a); + /* now iterate */ + for (i=0; i<80; i+=8) { + if (!(i & 8)) { + int j; + + if (i < 16) { + /* load the input */ + for (j = 0; j < 16; j++) + LOAD_OP(i + j, W, input); + } else { + for (j = 0; j < 16; j++) { + BLEND_OP(i + j, W); + } + } + } + + t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; + t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; + t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; + t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; + t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; + t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; + t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; + t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; + t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; + t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; + t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; + t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; + t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; + t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; + t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7]; + t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; } state[0] += a; state[1] += b; state[2] += c; state[3] += d; -- cgit v1.2.3 From f2ea0f5f04c97b48c88edccba52b0682fbe45087 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 14 Jan 2012 21:44:49 +0300 Subject: crypto: sha512 - use standard ror64() Use standard ror64() instead of hand-written. There is no standard ror64, so create it. The difference is shift value being "unsigned int" instead of uint64_t (for which there is no reason). gcc starts to emit native ROR instructions which it doesn't do for some reason currently. This should make the code faster. Patch survives in-tree crypto test and ping flood with hmac(sha512) on. Signed-off-by: Alexey Dobriyan Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 13 ++++--------- include/linux/bitops.h | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 9 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index f04af931a682..107f6f7be5e1 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -31,11 +31,6 @@ static inline u64 Maj(u64 x, u64 y, u64 z) return (x & y) | (z & (x | y)); } -static inline u64 RORu64(u64 x, u64 y) -{ - return (x >> y) | (x << (64 - y)); -} - static const u64 sha512_K[80] = { 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, @@ -66,10 +61,10 @@ static const u64 sha512_K[80] = { 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, }; -#define e0(x) (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39)) -#define e1(x) (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41)) -#define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7)) -#define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6)) +#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39)) +#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41)) +#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7)) +#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6)) static inline void LOAD_OP(int I, u64 *W, const u8 *input) { diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a3ef66a2a083..fc8a3ffce320 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -49,6 +49,26 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << shift) | (word >> (64 - shift)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> shift) | (word << (64 - shift)); +} + /** * rol32 - rotate a 32-bit value left * @word: value to rotate -- cgit v1.2.3