lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210908132525.564106760@linutronix.de>
Date:   Wed,  8 Sep 2021 15:29:27 +0200 (CEST)
From:   Thomas Gleixner <tglx@...utronix.de>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     x86@...nel.org, Al Viro <viro@...iv.linux.org.uk>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Tony Luck <tony.luck@...el.com>,
        Alexei Starovoitov <ast@...nel.org>,
        Peter Ziljstra <peterz@...radead.org>,
        Song Liu <songliubraving@...com>,
        Daniel Borkmann <daniel@...earbox.net>
Subject: [patch V3 11/20] x86/fpu: Dont use MCE safe fixups for writing FPU
 state to user space

Writes cannot raise #MC, so no point in pretending that the code can handle
in kernel #MC recovery.

Reported-by: Peter Ziljstra <peterz@...radead.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
V2: New patch
---
 arch/x86/include/asm/fpu/internal.h |   48 ++++++++++++++++++++++++++++++------
 1 file changed, 40 insertions(+), 8 deletions(-)

--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -92,7 +92,7 @@ extern void save_fpregs_to_fpstate(struc
  * Returns 0 on success or the trap number when the operation raises an
  * exception.
  */
-#define user_insn(insn, output, input...)				\
+#define user_insn_mce_safe(insn, output, input...)			\
 ({									\
 	int err;							\
 									\
@@ -107,6 +107,25 @@ extern void save_fpregs_to_fpstate(struc
 	err;								\
 })
 
+#define user_insn(insn, output, input...)				\
+({									\
+	int err;							\
+									\
+	might_fault();							\
+									\
+	asm volatile(ASM_STAC "\n"					\
+		     "1: " #insn "\n"					\
+		     "2: " ASM_CLAC "\n"				\
+		     ".section .fixup,\"ax\"\n"				\
+		     "3:  movl $-1,%[err]\n"				\
+		     "    jmp  2b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : [err] "=a" (err), output				\
+		     : "0"(0), input);					\
+	err;								\
+})
+
 #define kernel_insn_err(insn, output, input...)				\
 ({									\
 	int err;							\
@@ -161,9 +180,9 @@ static inline int fxrstor_safe(struct fx
 static inline int fxrstor_from_user_sigframe(struct fxregs_state __user *fx)
 {
 	if (IS_ENABLED(CONFIG_X86_32))
-		return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+		return user_insn_mce_safe(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 	else
-		return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+		return user_insn_mce_safe(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
 static inline void frstor(struct fregs_state *fx)
@@ -178,7 +197,7 @@ static inline int frstor_safe(struct fre
 
 static inline int frstor_from_user_sigframe(struct fregs_state __user *fx)
 {
-	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+	return user_insn_mce_safe(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
 static inline void fxsave(struct fxregs_state *fx)
@@ -200,7 +219,7 @@ static inline void fxsave(struct fxregs_
  * After this @err contains 0 on success or the trap number when the
  * operation raises an exception.
  */
-#define XSTATE_OP(op, st, lmask, hmask, err)				\
+#define XSTATE_OP_MCE_SAFE(op, st, lmask, hmask, err)			\
 	asm volatile("1:" op "\n\t"					\
 		     "xor %[err], %[err]\n"				\
 		     "2:\n\t"						\
@@ -209,6 +228,19 @@ static inline void fxsave(struct fxregs_
 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
 		     : "memory")
 
+#define XSTATE_OP(op, st, lmask, hmask, err)				\
+	asm volatile("1:" op "\n\t"					\
+		     "xor %[err], %[err]\n"				\
+		     "2:\n\t"						\
+		     ".section .fixup,\"ax\"\n"				\
+		     "3:  movl $-1,%[err]\n"				\
+		     "    jmp  2b\n"					\
+		     ".previous\n"					\
+		     _ASM_EXTABLE(1b, 3b)				\
+		     : [err] "=a" (err)					\
+		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
+		     : "memory")
+
 /*
  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
  * format and supervisor states in addition to modified optimization in
@@ -360,15 +392,15 @@ static inline int xrstor_from_user_sigfr
 	int err;
 
 	stac();
-	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+	XSTATE_OP_MCE_SAFE(XRSTOR, xstate, lmask, hmask, err);
 	clac();
 
 	return err;
 }
 
 /*
- * Restore xstate from kernel space xsave area, return an error code instead of
- * an exception.
+ * Restore xstate from kernel space xsave area, return an error code when
+ * the operation raises an exception.
  */
 static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
 {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ