lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YTCYiUIGeluu8Bov@hirez.programming.kicks-ass.net>
Date:   Thu, 2 Sep 2021 11:25:29 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Lai Jiangshan <jiangshanlai@...il.com>
Cc:     linux-kernel@...r.kernel.org,
        Lai Jiangshan <laijs@...ux.alibaba.com>,
        Andy Lutomirski <luto@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH 09/24] x86/traps: Add fence_swapgs_{user,kernel}_entry()

On Wed, Sep 01, 2021 at 01:50:10AM +0800, Lai Jiangshan wrote:

> +/*
> + * Mitigate Spectre v1 for conditional swapgs code paths.
> + *
> + * fence_swapgs_user_entry is used in the user entry swapgs code path, to
> + * prevent a speculative swapgs when coming from kernel space.
> + *
> + * fence_swapgs_kernel_entry is used in the kernel entry non-swapgs code path,
> + * to prevent the swapgs from getting speculatively skipped when coming from
> + * user space.
> + */
> +static __always_inline void fence_swapgs_user_entry(void)
> +{
> +	alternative("", "lfence", X86_FEATURE_FENCE_SWAPGS_USER);
> +}
> +
> +static __always_inline void fence_swapgs_kernel_entry(void)
> +{
> +	alternative("", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL);
> +}

I think slightly larger primitives might make more sense; that is
include the swapgs in these function and drop the fence_ prefix.

Something a bit like...

--- a/arch/x86/entry/traps.c
+++ b/arch/x86/entry/traps.c
@@ -853,20 +853,22 @@ static __always_inline void ist_restore_
 /*
  * Mitigate Spectre v1 for conditional swapgs code paths.
  *
- * fence_swapgs_user_entry is used in the user entry swapgs code path, to
- * prevent a speculative swapgs when coming from kernel space.
+ * swapgs_user_entry is used in the user entry swapgs code path, to prevent a
+ * speculative swapgs when coming from kernel space.
  *
- * fence_swapgs_kernel_entry is used in the kernel entry non-swapgs code path,
- * to prevent the swapgs from getting speculatively skipped when coming from
- * user space.
+ * swapgs_kernel_entry is used in the kernel entry non-swapgs code path, to
+ * prevent the swapgs from getting speculatively skipped when coming from user
+ * space.
  */
-static __always_inline void fence_swapgs_user_entry(void)
+static __always_inline void swapgs_user_entry(void)
 {
+	native_swapgs();
 	alternative("", "lfence", X86_FEATURE_FENCE_SWAPGS_USER);
 }
 
-static __always_inline void fence_swapgs_kernel_entry(void)
+static __always_inline void swapgs_kernel_entry(void)
 {
+	native_swapgs();
 	alternative("", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL);
 }
 
@@ -896,8 +898,7 @@ struct pt_regs *do_error_entry(struct pt
 		 * We entered from user mode.
 		 * Switch to kernel gsbase and CR3.
 		 */
-		native_swapgs();
-		fence_swapgs_user_entry();
+		swapgs_user_entry();
 		switch_to_kernel_cr3();
 
 		/* Put pt_regs onto the task stack. */
@@ -917,8 +918,7 @@ struct pt_regs *do_error_entry(struct pt
 		 * We came from an IRET to user mode, so we have user
 		 * gsbase and CR3.  Switch to kernel gsbase and CR3:
 		 */
-		native_swapgs();
-		fence_swapgs_user_entry();
+		swapgs_user_entry();
 		switch_to_kernel_cr3();
 
 		/*
@@ -936,8 +936,7 @@ struct pt_regs *do_error_entry(struct pt
 	 * handler with kernel gsbase.
 	 */
 	if (eregs->ip == (unsigned long)asm_load_gs_index_gs_change) {
-		native_swapgs();
-		fence_swapgs_user_entry();
+		swapgs_user_entry();
 	} else {
 		fence_swapgs_kernel_entry();
 	}
@@ -1017,14 +1016,12 @@ static __always_inline unsigned long ist
 	if ((long)gsbase < 0)
 		return 1;
 
-	native_swapgs();
-
 	/*
 	 * The above ist_switch_to_kernel_cr3() doesn't do an unconditional
 	 * CR3 write, even in the PTI case.  So do an lfence to prevent GS
 	 * speculation, regardless of whether PTI is enabled.
 	 */
-	fence_swapgs_kernel_entry();
+	swapgs_kernel_entry();
 
 	/* SWAPGS required on exit */
 	return 0;
@@ -1089,7 +1086,7 @@ void paranoid_exit(struct ist_regs *ist)
 	}
 
 	if (!ist->gsbase)
-		native_swapgs();
+		swapgs_user_entry();
 }
 #endif
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ