[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220817144045.3206-1-ubizjak@gmail.com>
Date: Wed, 17 Aug 2022 16:40:45 +0200
From: Uros Bizjak <ubizjak@...il.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: Uros Bizjak <ubizjak@...il.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [PATCH] KVM/VMX: Do not declare vmread_error asmlinkage
There is no need to declare vmread_error asmlinkage, its arguments
can be passed via registers for both, 32-bit and 64-bit targets.
Function argument registers are considered call-clobbered registers,
they are saved in the trampoline just before the function call and
restored afterwards.
Note that asmlinkage and __attribute__((regparm(0))) have no effect
on 64-bit targets. The trampoline is called from the assembler glue
code that implements its own stack-passing function calling convention,
so the attribute on the trampoline declaration does not change anything
for 64-bit as well as 32-bit targets. We can declare it asmlinkage for
documentation purposes.
The patch unifies trampoline function argument handling between 32-bit
and 64-bit targets and improves generated code for 32-bit targets.
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Uros Bizjak <ubizjak@...il.com>
---
arch/x86/kvm/vmx/vmenter.S | 15 +++------------
arch/x86/kvm/vmx/vmx.c | 2 +-
arch/x86/kvm/vmx/vmx_ops.h | 6 +++---
3 files changed, 7 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 6de96b943804..2b83bab6e371 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -293,22 +293,13 @@ SYM_FUNC_START(vmread_error_trampoline)
push %r10
push %r11
#endif
-#ifdef CONFIG_X86_64
+
/* Load @field and @fault to arg1 and arg2 respectively. */
- mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
- mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
-#else
- /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
- push 3*WORD_SIZE(%ebp)
- push 2*WORD_SIZE(%ebp)
-#endif
+ mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2
+ mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1
call vmread_error
-#ifndef CONFIG_X86_64
- add $8, %esp
-#endif
-
/* Zero out @fault, which will be popped into the result register. */
_ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d7f8331d6f7e..c940688ceaa4 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -439,7 +439,7 @@ do { \
pr_warn_ratelimited(fmt); \
} while (0)
-asmlinkage void vmread_error(unsigned long field, bool fault)
+void vmread_error(unsigned long field, bool fault)
{
if (fault)
kvm_spurious_fault();
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 5cfc49ddb1b4..550a89394d9f 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -10,9 +10,9 @@
#include "vmcs.h"
#include "../x86.h"
-asmlinkage void vmread_error(unsigned long field, bool fault);
-__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
- bool fault);
+void vmread_error(unsigned long field, bool fault);
+asmlinkage void vmread_error_trampoline(unsigned long field,
+ bool fault);
void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
--
2.37.1
Powered by blists - more mailing lists