[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YbjmIPWtd6ke66CU@google.com>
Date: Tue, 14 Dec 2021 18:44:48 +0000
From: Sean Christopherson <seanjc@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: x86@...nel.org, linux-kernel@...r.kernel.org, jpoimboe@...hat.com,
mark.rutland@....com, dvyukov@...gle.com, pbonzini@...hat.com,
mbenes@...e.cz
Subject: Re: [PATCH v2 16/23] x86,vmx: Provide asm-goto-output vmread
On Mon, Dec 13, 2021, Peter Zijlstra wrote:
>
> Here's a version that doesn't make clang ICE..
>
> Paolo, since this doesn't really depend on the .fixup removal series,
> feel free to collect it in the kvm tree or somesuch.
>
> ---
> Subject: x86/vmx: Provide asm-goto-output vmread
> From: Peter Zijlstra <peterz@...radead.org>
> Date: Wed, 10 Nov 2021 11:01:18 +0100
>
> Use asm-goto-output for smaller fast path code.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> Reviewed-by: Josh Poimboeuf <jpoimboe@...hat.com>
> ---
> arch/x86/kvm/vmx/vmx_ops.h | 27 +++++++++++++++++++++++++++
> 1 file changed, 27 insertions(+)
>
> --- a/arch/x86/kvm/vmx/vmx_ops.h
> +++ b/arch/x86/kvm/vmx/vmx_ops.h
> @@ -71,6 +71,31 @@ static __always_inline unsigned long __v
> {
> unsigned long value;
>
> +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
> +
> + asm_volatile_goto("1: vmread %[field], %[output]\n\t"
> + "jna %l[do_fail]\n\t"
> +
> + _ASM_EXTABLE(1b, %l[do_exception])
> +
> + : [output] "=r" (value)
> + : [field] "r" (field)
> + : "cc"
> + : do_fail, do_exception);
> +
> + return value;
> +
> +do_fail:
> + WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
> + pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
This needs to route through a noinline vmread_error(), the intent is that KVM only
does WARN once for all VMREAD failures, whereas having this inline will WARN once
on each unique vmcs_read*(). And at that point, we might as well hide the asm
trampoline too. The amount of ifdeffery gets a bit gross, but in for a penny in
for a pound?
And like the other VMX instruction, I assume it's safe to annotate the failure
path as instrumentation safe.
So this?
---
arch/x86/kvm/vmx/vmenter.S | 2 ++
arch/x86/kvm/vmx/vmx.c | 7 +++++++
arch/x86/kvm/vmx/vmx_ops.h | 31 +++++++++++++++++++++++++++++++
3 files changed, 40 insertions(+)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 3a6461694fc2..d8a7a0a69ec1 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -238,6 +238,7 @@ SYM_FUNC_END(__vmx_vcpu_run)
.section .text, "ax"
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
/**
* vmread_error_trampoline - Trampoline from inline asm to vmread_error()
* @field: VMCS field encoding that failed
@@ -295,6 +296,7 @@ SYM_FUNC_START(vmread_error_trampoline)
ret
SYM_FUNC_END(vmread_error_trampoline)
+#endif
SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
/*
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 640f4719612c..746c0952ddac 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -373,6 +373,12 @@ do { \
pr_warn_ratelimited(fmt); \
} while (0)
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+noinline void vmread_error(unsigned long field)
+{
+ vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
+}
+#else
asmlinkage void vmread_error(unsigned long field, bool fault)
{
if (fault)
@@ -380,6 +386,7 @@ asmlinkage void vmread_error(unsigned long field, bool fault)
else
vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
}
+#endif
noinline void vmwrite_error(unsigned long field, unsigned long value)
{
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 9e9ef47e988c..6cdc4ff4335f 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -10,9 +10,13 @@
#include "vmcs.h"
#include "x86.h"
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+noinline void vmread_error(unsigned long field);
+#else
asmlinkage void vmread_error(unsigned long field, bool fault);
__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
bool fault);
+#endif
void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
@@ -71,6 +75,31 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
{
unsigned long value;
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+ asm_volatile_goto("1: vmread %[field], %[output]\n\t"
+ "jna %l[do_fail]\n\t"
+
+ _ASM_EXTABLE(1b, %l[do_exception])
+
+ : [output] "=r" (value)
+ : [field] "r" (field)
+ : "cc"
+ : do_fail, do_exception);
+
+ return value;
+
+do_fail:
+ instrumentation_begin();
+ vmread_error(field);
+ instrumentation_end();
+ return 0;
+
+do_exception:
+ kvm_spurious_fault();
+ return 0;
+
+#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
asm volatile("1: vmread %2, %1\n\t"
".byte 0x3e\n\t" /* branch taken hint */
"ja 3f\n\t"
@@ -101,6 +130,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
_ASM_EXTABLE(1b, 4b)
: ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
return value;
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
}
static __always_inline u16 vmcs_read16(unsigned long field)
--
Powered by blists - more mailing lists