lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220501193102.704267030@linutronix.de>
Date:   Sun,  1 May 2022 21:31:47 +0200 (CEST)
From:   Thomas Gleixner <tglx@...utronix.de>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     x86@...nel.org, Filipe Manana <fdmanana@...e.com>
Subject: [patch 3/3] x86/fpu: Make FPU protection more robust

FPU state maintenance is protected by fpregs_lock(), which is a wrapper
around local_bh_disable() on non-RT kernels and preempt_disable() on RT
kernels. In-kernel FPU usage has it's own protection via a per CPU
variable.

This separation is pointless and error-prone as a recently discovered wrong
condition for granting in-kernel FPU usage has shown.

Make the whole FPU state protection simpler and more robust by using the
per CPU usage variable for all FPU operations so state is tracked
consistently.

Change related WARN_ON_FPU() instances to WARN_ON_ONCE() as the usage of
CONFIG_X86_DEBUG_FPU is optional and hides inconsistencies for a
potentially long time.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
 arch/x86/include/asm/fpu/api.h |   17 +-------
 arch/x86/kernel/fpu/core.c     |   78 ++++++++++++++++++++++++++---------------
 2 files changed, 52 insertions(+), 43 deletions(-)

--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -66,21 +66,8 @@ static inline void kernel_fpu_begin(void
  *
  * Disabling preemption also serializes against kernel_fpu_begin().
  */
-static inline void fpregs_lock(void)
-{
-	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-		local_bh_disable();
-	else
-		preempt_disable();
-}
-
-static inline void fpregs_unlock(void)
-{
-	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-		local_bh_enable();
-	else
-		preempt_enable();
-}
+extern void fpregs_lock(void);
+extern void fpregs_unlock(void);
 
 #ifdef CONFIG_X86_DEBUG_FPU
 extern void fpregs_assert_state_consistent(void);
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -42,7 +42,7 @@ struct fpu_state_config fpu_user_cfg __r
 struct fpstate init_fpstate __ro_after_init;
 
 /* Track in-kernel FPU usage */
-static DEFINE_PER_CPU(bool, in_kernel_fpu);
+static DEFINE_PER_CPU(bool, fpu_in_use);
 
 /*
  * Track which context is using the FPU on the CPU:
@@ -50,6 +50,50 @@ static DEFINE_PER_CPU(bool, in_kernel_fp
 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
 
 /**
+ * fpregs_lock - Lock FPU state for maintenance operations
+ *
+ * This protects against preemption, soft interrupts and in-kernel FPU
+ * usage on both !RT and RT enabled kernels.
+ *
+ * !RT kernels use local_bh_disable() to prevent soft interrupt processing
+ * and preemption.
+ *
+ * On RT kernels local_bh_disable() is not sufficient because it only
+ * serializes soft interrupt related sections via a local lock, but stays
+ * preemptible. Disabling preemption is the right choice here as bottom
+ * half processing is always in thread context on RT kernels so it
+ * implicitly prevents bottom half processing as well.
+ */
+void fpregs_lock(void)
+{
+	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+		local_bh_disable();
+	else
+		preempt_disable();
+
+	WARN_ON_ONCE(this_cpu_read(fpu_in_use));
+	this_cpu_write(fpu_in_use, true);
+}
+EXPORT_SYMBOL_GPL(fpregs_lock);
+
+/**
+ * fpregs_unlock - Unlock FPU state after maintenance operations
+ *
+ * Counterpart to fpregs_lock().
+ */
+void fpregs_unlock(void)
+{
+	WARN_ON_ONCE(!this_cpu_read(fpu_in_use));
+	this_cpu_write(fpu_in_use, false);
+
+	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+		local_bh_enable();
+	else
+		preempt_enable();
+}
+EXPORT_SYMBOL_GPL(fpregs_unlock);
+
+/**
  * kernel_fpu_usable - Check whether kernel FPU usage is possible
  *
  * Has to be invoked before calling kernel_fpu_begin().
@@ -59,28 +103,7 @@ bool kernel_fpu_usable(void)
 	if (WARN_ON_ONCE(in_nmi()))
 		return false;
 
-	/* In kernel FPU usage already active? */
-	if (this_cpu_read(in_kernel_fpu))
-		return false;
-
-	/*
-	 * When not in NMI or hard interrupt context, FPU can be used:
-	 *
-	 * - Task context is safe except from within fpregs_lock()'ed
-	 *   critical regions.
-	 *
-	 * - Soft interrupt processing context which cannot happen
-	 *   while in a fpregs_lock()'ed critical region.
-	 */
-	if (!in_hardirq())
-		return true;
-
-	/*
-	 * In hard interrupt context it's safe when soft interrupts
-	 * are enabled, which means the interrupt did not hit in
-	 * a fpregs_lock()'ed critical region.
-	 */
-	return !softirq_count();
+	return !this_cpu_read(fpu_in_use);
 }
 EXPORT_SYMBOL(kernel_fpu_usable);
 
@@ -410,10 +433,9 @@ void kernel_fpu_begin_mask(unsigned int
 {
 	preempt_disable();
 
-	WARN_ON_FPU(!kernel_fpu_usable());
-	WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
+	WARN_ON_ONCE(!kernel_fpu_usable());
 
-	this_cpu_write(in_kernel_fpu, true);
+	this_cpu_write(fpu_in_use, true);
 
 	if (!(current->flags & PF_KTHREAD) &&
 	    !test_thread_flag(TIF_NEED_FPU_LOAD)) {
@@ -433,9 +455,9 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask)
 
 void kernel_fpu_end(void)
 {
-	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
+	WARN_ON_ONCE(!this_cpu_read(fpu_in_use));
 
-	this_cpu_write(in_kernel_fpu, false);
+	this_cpu_write(fpu_in_use, false);
 	preempt_enable();
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_end);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ