summaryrefslogtreecommitdiffstats
path: root/crypto/algapi.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2022-07-25 11:36:35 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2022-08-19 18:39:34 +0800
commit6e78ad0bb45dd20b3c1a56c72a32e1d82f98b422 (patch)
tree65ac4a9b20b465269f3bfc453cc0c773525faa5b /crypto/algapi.c
parent7033b937e21b12629d920e7864c20c46bc4ccf39 (diff)
downloadlinux-6e78ad0bb45dd20b3c1a56c72a32e1d82f98b422.tar.bz2
crypto: lib - move __crypto_xor into utils
CRYPTO_LIB_CHACHA depends on CRYPTO for __crypto_xor, defined in crypto/algapi.c. This is a layering violation because the dependencies should only go in the other direction (crypto/ => lib/crypto/). Also the correct dependency would be CRYPTO_ALGAPI, not CRYPTO. Fix this by moving __crypto_xor into the utils module in lib/crypto/. Note that CRYPTO_LIB_CHACHA_GENERIC selected XOR_BLOCKS, which is unrelated and unnecessary. It was perhaps thought that XOR_BLOCKS was needed for __crypto_xor, but that's not the case. Signed-off-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/algapi.c')
-rw-r--r--crypto/algapi.c71
1 files changed, 0 insertions, 71 deletions
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d1c99288af3e..5c69ff8e8fa5 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_inc);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
-{
- int relalign = 0;
-
- if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- int size = sizeof(unsigned long);
- int d = (((unsigned long)dst ^ (unsigned long)src1) |
- ((unsigned long)dst ^ (unsigned long)src2)) &
- (size - 1);
-
- relalign = d ? 1 << __ffs(d) : size;
-
- /*
- * If we care about alignment, process as many bytes as
- * needed to advance dst and src to values whose alignments
- * equal their relative alignment. This will allow us to
- * process the remainder of the input using optimal strides.
- */
- while (((unsigned long)dst & (relalign - 1)) && len > 0) {
- *dst++ = *src1++ ^ *src2++;
- len--;
- }
- }
-
- while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- u64 l = get_unaligned((u64 *)src1) ^
- get_unaligned((u64 *)src2);
- put_unaligned(l, (u64 *)dst);
- } else {
- *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
- }
- dst += 8;
- src1 += 8;
- src2 += 8;
- len -= 8;
- }
-
- while (len >= 4 && !(relalign & 3)) {
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- u32 l = get_unaligned((u32 *)src1) ^
- get_unaligned((u32 *)src2);
- put_unaligned(l, (u32 *)dst);
- } else {
- *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
- }
- dst += 4;
- src1 += 4;
- src2 += 4;
- len -= 4;
- }
-
- while (len >= 2 && !(relalign & 1)) {
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- u16 l = get_unaligned((u16 *)src1) ^
- get_unaligned((u16 *)src2);
- put_unaligned(l, (u16 *)dst);
- } else {
- *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
- }
- dst += 2;
- src1 += 2;
- src2 += 2;
- len -= 2;
- }
-
- while (len--)
- *dst++ = *src1++ ^ *src2++;
-}
-EXPORT_SYMBOL_GPL(__crypto_xor);
-
unsigned int crypto_alg_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize +