[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <Z_SGBC1pgraNHprS@gondor.apana.org.au>
Date: Tue, 8 Apr 2025 10:12:20 +0800
From: Herbert Xu <herbert@...dor.apana.org.au>
To: Eric Biggers <ebiggers@...nel.org>
Cc: Ard Biesheuvel <ardb@...nel.org>, linux-crypto@...r.kernel.org,
linux-kernel@...r.kernel.org, x86@...nel.org,
"Jason A. Donenfeld" <Jason@...c4.com>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [PATCH] crypto: x86/chacha - Restore SSSE3 fallback path
On Mon, Apr 07, 2025 at 09:48:42AM -0700, Eric Biggers wrote:
>
> First, there doesn't seem to be agreement yet that the library functions should
> have requirements on the calling context.
Do you have a real example of hard IRQ usage for chacha? Not some
imaginary post-crash scenario that ends up calling into generic code.
And if you really wanted to do that, it's much better to fix up
kernel_fpu_begin to support hard IRQs rather than adding useless
may_use_simd() checks all over the place.
> Second, your patch made unrelated changes that deleted the checks for SSSE3
> support. Thus dropping support for CPUs that don't support SSSE3.
Sorry. That was an oversight.
---8<---
The chacha_use_simd static branch is required for x86 machines that
lack SSSE3 support. Restore it and the generic fallback code.
Reported-by: Eric Biggers <ebiggers@...nel.org>
Fixes: 9b4400215e0e ("crypto: x86/chacha - Remove SIMD fallback path")
Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index b7fd7a1f0e15..fcc14c006bde 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -5,11 +5,12 @@
* Copyright (C) 2015 Martin Willi
*/
+#include <asm/simd.h>
#include <crypto/chacha.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
-#include <asm/simd.h>
asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
@@ -31,6 +32,7 @@ asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
@@ -117,15 +119,23 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
- kernel_fpu_begin();
- hchacha_block_ssse3(state, stream, nrounds);
- kernel_fpu_end();
+ if (!static_branch_likely(&chacha_use_simd)) {
+ hchacha_block_generic(state, stream, nrounds);
+ } else {
+ kernel_fpu_begin();
+ hchacha_block_ssse3(state, stream, nrounds);
+ kernel_fpu_end();
+ }
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
+ if (!static_branch_likely(&chacha_use_simd) ||
+ bytes <= CHACHA_BLOCK_SIZE)
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
do {
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
@@ -142,7 +152,7 @@ EXPORT_SYMBOL(chacha_crypt_arch);
bool chacha_is_arch_optimized(void)
{
- return true;
+ return static_key_enabled(&chacha_use_simd);
}
EXPORT_SYMBOL(chacha_is_arch_optimized);
@@ -151,6 +161,8 @@ static int __init chacha_simd_mod_init(void)
if (!boot_cpu_has(X86_FEATURE_SSSE3))
return 0;
+ static_branch_enable(&chacha_use_simd);
+
if (boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
--
Email: Herbert Xu <herbert@...dor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Powered by blists - more mailing lists