lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 5 Nov 2021 21:58:05 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Sean Christopherson <seanjc@...gle.com>
Cc:     x86@...nel.org, linux-kernel@...r.kernel.org, jpoimboe@...hat.com,
        mark.rutland@....com, dvyukov@...gle.com, pbonzini@...hat.com,
        mbenes@...e.cz
Subject: Re: [RFC][PATCH 15/22] x86,vmx: Remove .fixup usage

On Fri, Nov 05, 2021 at 06:17:53PM +0000, Sean Christopherson wrote:

> diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
> index 9e9ef47e988c..5e5113d2b324 100644
> --- a/arch/x86/kvm/vmx/vmx_ops.h
> +++ b/arch/x86/kvm/vmx/vmx_ops.h

> @@ -76,29 +75,24 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
>  		     "ja 3f\n\t"
> 
>  		     /*
> -		      * VMREAD failed.  Push '0' for @fault, push the failing
> -		      * @field, and bounce through the trampoline to preserve
> -		      * volatile registers.
> +		      * VMREAD failed, push the failing @field, and bounce
> +		      * through the trampoline to preserve volatile registers.
> +		      * If VMREAD faults, this will push -FAULT (see below).
>  		      */
> -		     "push $0\n\t"
> -		     "push %2\n\t"
> -		     "2:call vmread_error_trampoline\n\t"
> +		     "2: push %2\n\t"
> +		     "call vmread_error_trampoline\n\t"
> 
>  		     /*
>  		      * Unwind the stack.  Note, the trampoline zeros out the
> -		      * memory for @fault so that the result is '0' on error.
> +		      * memory for @field so that the result is '0' on error,
> +		      * hence the pop to %1, not %2.
>  		      */
> -		     "pop %2\n\t"
>  		     "pop %1\n\t"
>  		     "3:\n\t"
> 
> -		     /* VMREAD faulted.  As above, except push '1' for @fault. */
> -		     ".pushsection .fixup, \"ax\"\n\t"
> -		     "4: push $1\n\t"
> -		     "push %2\n\t"
> -		     "jmp 2b\n\t"
> -		     ".popsection\n\t"
> -		     _ASM_EXTABLE(1b, 4b)
> +		     /* VMREAD faulted.  As above, except push '-EFAULT' for @fault. */
> +		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %1)
> +
>  		     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
>  	return value;
>  }

A different option is something like the below; down side is that it
increases the amount of text, while your version decreases the amount of
useless text (the gunk between vmread and 3:, all of which should
ideally live out-of-line).

For now I'll stick with your patch.

diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 9e9ef47e988c..99fc1f34fbd4 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -81,8 +81,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
 		      * volatile registers.
 		      */
 		     "push $0\n\t"
-		     "push %2\n\t"
-		     "2:call vmread_error_trampoline\n\t"
+		     "2: push %2\n\t"
+		     "call vmread_error_trampoline\n\t"
 
 		     /*
 		      * Unwind the stack.  Note, the trampoline zeros out the
@@ -90,14 +90,14 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
 		      */
 		     "pop %2\n\t"
 		     "pop %1\n\t"
-		     "3:\n\t"
+		     "jmp 3f\n\t"
 
-		     /* VMREAD faulted.  As above, except push '1' for @fault. */
-		     ".pushsection .fixup, \"ax\"\n\t"
 		     "4: push $1\n\t"
-		     "push %2\n\t"
 		     "jmp 2b\n\t"
-		     ".popsection\n\t"
+
+		     "3:\n\t"
+
+		     /* VMREAD faulted.  As above, except push '1' for @fault. */
 		     _ASM_EXTABLE(1b, 4b)
 		     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
 	return value;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ