lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 1 Dec 2017 12:32:35 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc:     Steven Rostedt <rostedt@...dmis.org>,
        linux-rt-users@...r.kernel.org, linux-kernel@...r.kernel.org,
        tglx@...utronix.de
Subject: Re: [PATCH RT v2] crypto: limit more FPU-enabled sections

On Fri, Dec 01, 2017 at 11:44:22AM +0100, Sebastian Andrzej Siewior wrote:
> --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
> +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
> @@ -206,6 +206,34 @@ struct crypt_priv {
>  	bool fpu_enabled;
>  };
>  
> +#ifdef CONFIG_PREEMPT_RT_FULL
> +static void camellia_fpu_end_rt(struct crypt_priv *ctx)
> +{
> +       bool fpu_enabled = ctx->fpu_enabled;
> +
> +       if (!fpu_enabled)
> +               return;
> +       camellia_fpu_end(fpu_enabled);
> +       ctx->fpu_enabled = false;
> +}
> +
> +static void camellia_fpu_sched_rt(struct crypt_priv *ctx)
> +{
> +       bool fpu_enabled = ctx->fpu_enabled;
> +
> +       if (!fpu_enabled || !tif_need_resched_now())
> +               return;
> +       camellia_fpu_end(fpu_enabled);
> +       kernel_fpu_end();
> +       /* schedule due to preemptible */
> +       kernel_fpu_begin();
> +}

There's a ton of duplication in there; you're not nearly lazy enough.

Why can't we do something simple like kernel_fpu_resched() ?

diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index f92a6593de1e..05321b98a55a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -130,6 +130,18 @@ void kernel_fpu_begin(void)
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 
+void kernel_fpu_resched(void)
+{
+	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
+
+	if (should_resched(PREEMPT_OFFSET)) {
+		kernel_fpu_end();
+		cond_resched();
+		kernel_fpu_begin();
+	}
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_resched);
+
 void kernel_fpu_end(void)
 {
 	__kernel_fpu_end();

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ