[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <ba8f4a02e90f1dc95a900f6a34caddce40a358d4.1540369608.git.jsteckli@amazon.de>
Date: Wed, 24 Oct 2018 10:28:59 +0200
From: Julian Stecklina <jsteckli@...zon.de>
To: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>
Cc: Julian Stecklina <jsteckli@...zon.de>, js@...en8.de,
linux-kernel@...r.kernel.org
Subject: [PATCH 4/4] kvm, vmx: remove manually coded vmx instructions
So far the VMX code relied on manually assembled VMX instructions. This
was apparently done to ensure compatibility with old binutils. VMX
instructions were introduced with binutils 2.19 and the kernel currently
requires binutils 2.20.
Remove the manually assembled versions and replace them with the proper
inline assembly. This improves code generation (and source code
readability).
According to the bloat-o-meter this change removes ~1300 bytes from the
text segment.
Signed-off-by: Julian Stecklina <jsteckli@...zon.de>
Reviewed-by: Jan H. Schönherr <jschoenh@...zon.de>
Reviewed-by: Konrad Jan Miller <kjm@...zon.de>
Reviewed-by: Razvan-Alin Ghitulete <rga@...zon.de>
---
arch/x86/include/asm/virtext.h | 2 +-
arch/x86/include/asm/vmx.h | 13 -------------
arch/x86/kvm/vmx.c | 39 ++++++++++++++++++---------------------
3 files changed, 19 insertions(+), 35 deletions(-)
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 0116b2e..c5395b3 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -40,7 +40,7 @@ static inline int cpu_has_vmx(void)
*/
static inline void cpu_vmxoff(void)
{
- asm volatile (ASM_VMX_VMXOFF : : : "cc");
+ asm volatile ("vmxoff" : : : "cc");
cr4_clear_bits(X86_CR4_VMXE);
}
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9527ba5..ade0f15 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -503,19 +503,6 @@ enum vmcs_field {
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
-
-#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
-#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
-#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
-#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
-#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
-#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
-#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
-#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
-
struct vmx_msr_entry {
u32 index;
u32 reserved;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 82cfb909..bbbdccb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2077,7 +2077,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
-static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+static inline void __invvpid(long ext, u16 vpid, gva_t gva)
{
struct {
u64 vpid : 16;
@@ -2086,21 +2086,21 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
} operand = { vpid, 0, gva };
bool error;
- asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
+ asm volatile ("invvpid %1, %2" CC_SET(na)
+ : CC_OUT(na) (error) : "m"(operand), "r"(ext)
: "memory");
BUG_ON(error);
}
-static inline void __invept(int ext, u64 eptp, gpa_t gpa)
+static inline void __invept(long ext, u64 eptp, gpa_t gpa)
{
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
bool error;
- asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
- : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
+ asm volatile ("invept %1, %2" CC_SET(na)
+ : CC_OUT(na) (error) : "m" (operand), "r" (ext)
: "memory");
BUG_ON(error);
}
@@ -2120,8 +2120,8 @@ static void vmcs_clear(struct vmcs *vmcs)
u64 phys_addr = __pa(vmcs);
bool error;
- asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
+ asm volatile ("vmclear %1" CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr)
: "memory");
if (unlikely(error))
printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
@@ -2145,8 +2145,8 @@ static void vmcs_load(struct vmcs *vmcs)
if (static_branch_unlikely(&enable_evmcs))
return evmcs_load(phys_addr);
- asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
+ asm volatile ("vmptrld %1" CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr)
: "memory");
if (unlikely(error))
printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
@@ -2323,8 +2323,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
{
unsigned long value;
- asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
- : "=a"(value) : "d"(field) : "cc");
+ asm volatile ("vmread %1, %0" : "=rm"(value) : "r"(field) : "cc");
return value;
}
@@ -2375,8 +2374,8 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
{
bool error;
- asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(value), "d"(field));
+ asm volatile ("vmwrite %1, %2" CC_SET(na)
+ : CC_OUT(na) (error) : "rm"(value), "r"(field));
if (unlikely(error))
vmwrite_error(field, value);
}
@@ -4397,9 +4396,7 @@ static void kvm_cpu_vmxon(u64 addr)
cr4_set_bits(X86_CR4_VMXE);
intel_pt_handle_vmx(1);
- asm volatile (ASM_VMX_VMXON_RAX
- : : "a"(&addr), "m"(addr)
- : "memory", "cc");
+ asm volatile ("vmxon %0" : : "m"(addr) : "memory", "cc");
}
static int hardware_enable(void)
@@ -4468,7 +4465,7 @@ static void vmclear_local_loaded_vmcss(void)
*/
static void kvm_cpu_vmxoff(void)
{
- asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+ asm volatile ("vmxoff" : : : "cc");
intel_pt_handle_vmx(0);
cr4_clear_bits(X86_CR4_VMXE);
@@ -10748,7 +10745,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
"jmp 1f \n\t"
"2: \n\t"
- __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
+ "vmwrite %%" _ASM_SP ", %%" _ASM_DX "\n\t"
"1: \n\t"
/* Check if vmlaunch of vmresume is needed */
"cmpl $0, %c[launched](%0) \n\t"
@@ -10773,9 +10770,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Enter guest mode */
"jne 1f \n\t"
- __ex(ASM_VMX_VMLAUNCH) "\n\t"
+ "vmlaunch \n\t"
"jmp 2f \n\t"
- "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ "1: vmresume \n\t"
"2: "
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
--
2.7.4
Powered by blists - more mailing lists