lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <074223fd-6f82-9ed6-8664-f324f5027da5@redhat.com>
Date:   Wed, 12 May 2021 13:58:40 +0200
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     Uros Bizjak <ubizjak@...il.com>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: SVM/VMX: Use %rax instead of %__ASM_AX within
 CONFIG_X86_64

On 12/05/21 13:21, Uros Bizjak wrote:
> There is no need to use %__ASM_AX within CONFIG_X86_64. The macro
> will always expand to %rax.
> 
> Cc: Paolo Bonzini <pbonzini@...hat.com>
> Signed-off-by: Uros Bizjak <ubizjak@...il.com>
> ---
>   arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++-------------------
>   arch/x86/kvm/vmx/vmenter.S | 32 +++++++++++++--------------
>   2 files changed, 38 insertions(+), 38 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
> index 4fa17df123cd..844b558bb021 100644
> --- a/arch/x86/kvm/svm/vmenter.S
> +++ b/arch/x86/kvm/svm/vmenter.S
> @@ -64,14 +64,14 @@ SYM_FUNC_START(__svm_vcpu_run)
>   	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
>   	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
>   #ifdef CONFIG_X86_64
> -	mov VCPU_R8 (%_ASM_AX),  %r8
> -	mov VCPU_R9 (%_ASM_AX),  %r9
> -	mov VCPU_R10(%_ASM_AX), %r10
> -	mov VCPU_R11(%_ASM_AX), %r11
> -	mov VCPU_R12(%_ASM_AX), %r12
> -	mov VCPU_R13(%_ASM_AX), %r13
> -	mov VCPU_R14(%_ASM_AX), %r14
> -	mov VCPU_R15(%_ASM_AX), %r15
> +	mov VCPU_R8 (%rax),  %r8
> +	mov VCPU_R9 (%rax),  %r9
> +	mov VCPU_R10(%rax), %r10
> +	mov VCPU_R11(%rax), %r11
> +	mov VCPU_R12(%rax), %r12
> +	mov VCPU_R13(%rax), %r13
> +	mov VCPU_R14(%rax), %r14
> +	mov VCPU_R15(%rax), %r15
>   #endif
>   
>   	/* "POP" @vmcb to RAX. */
> @@ -93,21 +93,21 @@ SYM_FUNC_START(__svm_vcpu_run)
>   	pop %_ASM_AX
>   
>   	/* Save all guest registers.  */
> -	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
> -	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
> -	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
> -	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
> -	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
> -	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
> +	mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
> +	mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
> +	mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> +	mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> +	mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> +	mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
>   #ifdef CONFIG_X86_64
> -	mov %r8,  VCPU_R8 (%_ASM_AX)
> -	mov %r9,  VCPU_R9 (%_ASM_AX)
> -	mov %r10, VCPU_R10(%_ASM_AX)
> -	mov %r11, VCPU_R11(%_ASM_AX)
> -	mov %r12, VCPU_R12(%_ASM_AX)
> -	mov %r13, VCPU_R13(%_ASM_AX)
> -	mov %r14, VCPU_R14(%_ASM_AX)
> -	mov %r15, VCPU_R15(%_ASM_AX)
> +	mov %r8,  VCPU_R8 (%rax)
> +	mov %r9,  VCPU_R9 (%rax)
> +	mov %r10, VCPU_R10(%rax)
> +	mov %r11, VCPU_R11(%rax)
> +	mov %r12, VCPU_R12(%rax)
> +	mov %r13, VCPU_R13(%rax)
> +	mov %r14, VCPU_R14(%rax)
> +	mov %r15, VCPU_R15(%rax)
>   #endif
>   
>   	/*
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 3a6461694fc2..9273709e4800 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -142,14 +142,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
>   	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
>   	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
>   #ifdef CONFIG_X86_64
> -	mov VCPU_R8 (%_ASM_AX),  %r8
> -	mov VCPU_R9 (%_ASM_AX),  %r9
> -	mov VCPU_R10(%_ASM_AX), %r10
> -	mov VCPU_R11(%_ASM_AX), %r11
> -	mov VCPU_R12(%_ASM_AX), %r12
> -	mov VCPU_R13(%_ASM_AX), %r13
> -	mov VCPU_R14(%_ASM_AX), %r14
> -	mov VCPU_R15(%_ASM_AX), %r15
> +	mov VCPU_R8 (%rax),  %r8
> +	mov VCPU_R9 (%rax),  %r9
> +	mov VCPU_R10(%rax), %r10
> +	mov VCPU_R11(%rax), %r11
> +	mov VCPU_R12(%rax), %r12
> +	mov VCPU_R13(%rax), %r13
> +	mov VCPU_R14(%rax), %r14
> +	mov VCPU_R15(%rax), %r15
>   #endif
>   	/* Load guest RAX.  This kills the @regs pointer! */
>   	mov VCPU_RAX(%_ASM_AX), %_ASM_AX
> @@ -175,14 +175,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
>   	mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
>   	mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
>   #ifdef CONFIG_X86_64
> -	mov %r8,  VCPU_R8 (%_ASM_AX)
> -	mov %r9,  VCPU_R9 (%_ASM_AX)
> -	mov %r10, VCPU_R10(%_ASM_AX)
> -	mov %r11, VCPU_R11(%_ASM_AX)
> -	mov %r12, VCPU_R12(%_ASM_AX)
> -	mov %r13, VCPU_R13(%_ASM_AX)
> -	mov %r14, VCPU_R14(%_ASM_AX)
> -	mov %r15, VCPU_R15(%_ASM_AX)
> +	mov %r8,  VCPU_R8 (%rax)
> +	mov %r9,  VCPU_R9 (%rax)
> +	mov %r10, VCPU_R10(%rax)
> +	mov %r11, VCPU_R11(%rax)
> +	mov %r12, VCPU_R12(%rax)
> +	mov %r13, VCPU_R13(%rax)
> +	mov %r14, VCPU_R14(%rax)
> +	mov %r15, VCPU_R15(%rax)
>   #endif
>   
>   	/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
> 

It looks a bit weird either way (either the address is different within 
the #ifdef, or the address is different from the destinatino), so I lean 
more towards avoiding churn.

Paolo

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ