[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250517022428.401622-6-ebiggers@kernel.org>
Date: Fri, 16 May 2025 19:24:25 -0700
From: Eric Biggers <ebiggers@...nel.org>
To: linux-crypto@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 5/8] Revert "crypto: arm64/sha256 - Add simd block function"
From: Eric Biggers <ebiggers@...gle.com>
This reverts commit adcb9e32e5e28935ec1148e1a314282a7367428d which got
pushed out despite being nacked.
That commit added a special low-level interface to allow the
crypto_shash API to bypass the safety check for using kernel-mode NEON.
It could give a marginal performance benefit for crypto_shash, but just
is not worth the complexity and footgun. Moreover, the distinction
between "arch" and "simd" is confusing and is not something that really
should exist in generic code, given that different architectures can
mean different things by "simd".
Signed-off-by: Eric Biggers <ebiggers@...gle.com>
---
arch/arm64/crypto/sha512-glue.c | 6 +++---
arch/arm64/lib/crypto/Kconfig | 1 -
arch/arm64/lib/crypto/sha2-armv8.pl | 2 +-
arch/arm64/lib/crypto/sha256.c | 14 +++++++-------
4 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 15aa9d8b7b2c4..ab2e1c13dfadc 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -16,17 +16,17 @@ MODULE_AUTHOR("Andy Polyakov <appro@...nssl.org>");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@...aro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage void sha512_blocks_arch(u64 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
+ unsigned int num_blks);
static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
int blocks)
{
- sha512_blocks_arch(sst->state, src, blocks);
+ sha512_block_data_order(sst->state, src, blocks);
}
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
diff --git a/arch/arm64/lib/crypto/Kconfig b/arch/arm64/lib/crypto/Kconfig
index 129a7685cb4c1..49e57bfdb5b52 100644
--- a/arch/arm64/lib/crypto/Kconfig
+++ b/arch/arm64/lib/crypto/Kconfig
@@ -15,6 +15,5 @@ config CRYPTO_POLY1305_NEON
config CRYPTO_SHA256_ARM64
tristate
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
diff --git a/arch/arm64/lib/crypto/sha2-armv8.pl b/arch/arm64/lib/crypto/sha2-armv8.pl
index 4aebd20c498bc..35ec9ae99fe16 100644
--- a/arch/arm64/lib/crypto/sha2-armv8.pl
+++ b/arch/arm64/lib/crypto/sha2-armv8.pl
@@ -93,11 +93,11 @@ if ($output =~ /512/) {
@sigma1=(17,19,10);
$rounds=64;
$reg_t="w";
}
-$func="sha${BITS}_blocks_arch";
+$func="sha${BITS}_block_data_order";
($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
@X=map("$reg_t$_",(3..15,0..2));
@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
diff --git a/arch/arm64/lib/crypto/sha256.c b/arch/arm64/lib/crypto/sha256.c
index bcf7a3adc0c46..fb9bff40357be 100644
--- a/arch/arm64/lib/crypto/sha256.c
+++ b/arch/arm64/lib/crypto/sha256.c
@@ -4,29 +4,29 @@
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/sha2.h>
+#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
-asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks);
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- static_branch_likely(&have_neon)) {
+ static_branch_likely(&have_neon) && crypto_simd_usable()) {
if (static_branch_likely(&have_ce)) {
do {
size_t rem;
kernel_neon_begin();
@@ -40,14 +40,14 @@ void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
kernel_neon_begin();
sha256_block_neon(state, data, nblocks);
kernel_neon_end();
}
} else {
- sha256_blocks_arch(state, data, nblocks);
+ sha256_block_data_order(state, data, nblocks);
}
}
-EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
bool sha256_is_arch_optimized(void)
{
/* We always can use at least the ARM64 scalar implementation. */
return true;
--
2.49.0
Powered by blists - more mailing lists