lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220716062920.210381-3-ebiggers@kernel.org>
Date:   Fri, 15 Jul 2022 23:29:19 -0700
From:   Eric Biggers <ebiggers@...nel.org>
To:     linux-crypto@...r.kernel.org
Cc:     linux-kernel@...r.kernel.org,
        "Jason A . Donenfeld " <Jason@...c4.com>
Subject: [PATCH v2 2/3] crypto: lib - move __crypto_xor into utils

From: Eric Biggers <ebiggers@...gle.com>

CRYPTO_LIB_CHACHA depends on CRYPTO for __crypto_xor, defined in
crypto/algapi.c.  This is a layering violation because the dependencies
should only go in the other direction (crypto/ => lib/crypto/).  Also
the correct dependency would be CRYPTO_ALGAPI, not CRYPTO.  Fix this by
moving __crypto_xor into the utils module in lib/crypto/.

Note that CRYPTO_LIB_CHACHA_GENERIC selected XOR_BLOCKS, which is
unrelated and unnecessary.  It was perhaps thought that XOR_BLOCKS was
needed for __crypto_xor, but that's not the case.

Signed-off-by: Eric Biggers <ebiggers@...gle.com>
---
 crypto/algapi.c     | 71 -------------------------------------
 lib/crypto/Kconfig  |  3 +-
 lib/crypto/Makefile |  2 +-
 lib/crypto/utils.c  | 85 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 87 insertions(+), 74 deletions(-)
 create mode 100644 lib/crypto/utils.c

diff --git a/crypto/algapi.c b/crypto/algapi.c
index d1c99288af3e0d..5c69ff8e8fa5c1 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size)
 }
 EXPORT_SYMBOL_GPL(crypto_inc);
 
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
-{
-	int relalign = 0;
-
-	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
-		int size = sizeof(unsigned long);
-		int d = (((unsigned long)dst ^ (unsigned long)src1) |
-			 ((unsigned long)dst ^ (unsigned long)src2)) &
-			(size - 1);
-
-		relalign = d ? 1 << __ffs(d) : size;
-
-		/*
-		 * If we care about alignment, process as many bytes as
-		 * needed to advance dst and src to values whose alignments
-		 * equal their relative alignment. This will allow us to
-		 * process the remainder of the input using optimal strides.
-		 */
-		while (((unsigned long)dst & (relalign - 1)) && len > 0) {
-			*dst++ = *src1++ ^ *src2++;
-			len--;
-		}
-	}
-
-	while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
-		if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
-			u64 l = get_unaligned((u64 *)src1) ^
-				get_unaligned((u64 *)src2);
-			put_unaligned(l, (u64 *)dst);
-		} else {
-			*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
-		}
-		dst += 8;
-		src1 += 8;
-		src2 += 8;
-		len -= 8;
-	}
-
-	while (len >= 4 && !(relalign & 3)) {
-		if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
-			u32 l = get_unaligned((u32 *)src1) ^
-				get_unaligned((u32 *)src2);
-			put_unaligned(l, (u32 *)dst);
-		} else {
-			*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
-		}
-		dst += 4;
-		src1 += 4;
-		src2 += 4;
-		len -= 4;
-	}
-
-	while (len >= 2 && !(relalign & 1)) {
-		if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
-			u16 l = get_unaligned((u16 *)src1) ^
-				get_unaligned((u16 *)src2);
-			put_unaligned(l, (u16 *)dst);
-		} else {
-			*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
-		}
-		dst += 2;
-		src1 += 2;
-		src2 += 2;
-		len -= 2;
-	}
-
-	while (len--)
-		*dst++ = *src1++ ^ *src2++;
-}
-EXPORT_SYMBOL_GPL(__crypto_xor);
-
 unsigned int crypto_alg_extsize(struct crypto_alg *alg)
 {
 	return alg->cra_ctxsize +
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index b09d9d6546cbc3..7e9683e9f5c636 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -36,7 +36,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
 
 config CRYPTO_LIB_CHACHA_GENERIC
 	tristate
-	select XOR_BLOCKS
+	select CRYPTO_LIB_UTILS
 	help
 	  This symbol can be depended upon by arch implementations of the
 	  ChaCha library interface that require the generic code as a
@@ -46,7 +46,6 @@ config CRYPTO_LIB_CHACHA_GENERIC
 
 config CRYPTO_LIB_CHACHA
 	tristate "ChaCha library interface"
-	depends on CRYPTO
 	depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
 	select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
 	help
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index b956b3bae26aaf..c852f067ab0601 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_CRYPTO_LIB_UTILS)			+= libcryptoutils.o
-libcryptoutils-y				:= memneq.o
+libcryptoutils-y				:= memneq.o utils.o
 
 # chacha is used by the /dev/random driver which is always builtin
 obj-y						+= chacha.o
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
new file mode 100644
index 00000000000000..f20bdb2ae88771
--- /dev/null
+++ b/lib/crypto/utils.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto library utility functions
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@...dor.apana.org.au>
+ */
+
+#include <crypto/algapi.h>
+#include <asm/unaligned.h>
+
+/*
+ * XOR @len bytes from @src1 and @src2 together, writing the result to @dst
+ * (which may alias one of the sources).  Don't call this directly; call
+ * crypto_xor() or crypto_xor_cpy() instead.
+ */
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
+{
+	int relalign = 0;
+
+	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+		int size = sizeof(unsigned long);
+		int d = (((unsigned long)dst ^ (unsigned long)src1) |
+			 ((unsigned long)dst ^ (unsigned long)src2)) &
+			(size - 1);
+
+		relalign = d ? 1 << __ffs(d) : size;
+
+		/*
+		 * If we care about alignment, process as many bytes as
+		 * needed to advance dst and src to values whose alignments
+		 * equal their relative alignment. This will allow us to
+		 * process the remainder of the input using optimal strides.
+		 */
+		while (((unsigned long)dst & (relalign - 1)) && len > 0) {
+			*dst++ = *src1++ ^ *src2++;
+			len--;
+		}
+	}
+
+	while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
+		if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+			u64 l = get_unaligned((u64 *)src1) ^
+				get_unaligned((u64 *)src2);
+			put_unaligned(l, (u64 *)dst);
+		} else {
+			*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+		}
+		dst += 8;
+		src1 += 8;
+		src2 += 8;
+		len -= 8;
+	}
+
+	while (len >= 4 && !(relalign & 3)) {
+		if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+			u32 l = get_unaligned((u32 *)src1) ^
+				get_unaligned((u32 *)src2);
+			put_unaligned(l, (u32 *)dst);
+		} else {
+			*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+		}
+		dst += 4;
+		src1 += 4;
+		src2 += 4;
+		len -= 4;
+	}
+
+	while (len >= 2 && !(relalign & 1)) {
+		if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+			u16 l = get_unaligned((u16 *)src1) ^
+				get_unaligned((u16 *)src2);
+			put_unaligned(l, (u16 *)dst);
+		} else {
+			*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+		}
+		dst += 2;
+		src1 += 2;
+		src2 += 2;
+		len -= 2;
+	}
+
+	while (len--)
+		*dst++ = *src1++ ^ *src2++;
+}
+EXPORT_SYMBOL_GPL(__crypto_xor);
-- 
2.37.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ