[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ZLBrM+Dee9okUmvc@google.com>
Date: Thu, 13 Jul 2023 14:22:59 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Su Hui <suhui@...china.com>
Cc: pbonzini@...hat.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, dave.hansen@...ux.intel.com, hpa@...or.com,
x86@...nel.org, kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel-janitors@...r.kernel.org
Subject: Re: [PATCH] KVM: VMX: Avoid noinstr warning
On Fri, Jul 07, 2023, Su Hui wrote:
> vmlinux.o: warning: objtool: vmx_vcpu_enter_exit+0x2d8:
> call to vmread_error_trampoline() leaves .noinstr.text section
>
> Signed-off-by: Su Hui <suhui@...china.com>
> ---
> arch/x86/kvm/vmx/vmx_ops.h | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
> index ce47dc265f89..54f86ce2ad60 100644
> --- a/arch/x86/kvm/vmx/vmx_ops.h
> +++ b/arch/x86/kvm/vmx/vmx_ops.h
> @@ -112,6 +112,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
>
> #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
>
> + instrumentation_begin();
> asm volatile("1: vmread %2, %1\n\t"
> ".byte 0x3e\n\t" /* branch taken hint */
> "ja 3f\n\t"
> @@ -139,6 +140,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
> _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
>
> : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
> + instrumentation_end();
Tagging the entire thing as instrumentable is not correct, e.g. instrumentation
isn't magically safe when doing VMREAD immediately after VM-Exit. Enabling
instrumentation for VM-Fail paths isn't exactly safe either, but odds are very
good that the system has major issue if a VMX instruction (other than VMLAUNCH/VMRESUME)
gets VM-Fail, in which case logging the error takes priority.
Compile tested only, but I think the below is the least awful solution. That will
also allow the CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y case to use vmread_error() instead
of open coding an equivalent (hence the "PATCH 1/2").
I'll post patches after testing.
Thanks!
From: Sean Christopherson <seanjc@...gle.com>
Date: Thu, 13 Jul 2023 13:45:35 -0700
Subject: [PATCH 1/2] KVM: VMX: Make VMREAD error path play nice with noinstr
Mark vmread_error_trampoline() as noinstr, and add a second trampoline
for the CONFIG_CC_HAS_ASM_GOTO_OUTPUT=n case to enable instrumentation
when handling VM-Fail on VMREAD. VMREAD is used in various noinstr
flows, e.g. immediately after VM-Exit, and objtool rightly complains that
the call to the error trampoline leaves a no-instrumentation section
without annotating that it's safe to do so.
vmlinux.o: warning: objtool: vmx_vcpu_enter_exit+0xc9:
call to vmread_error_trampoline() leaves .noinstr.text section
Note, strictly speaking, enabling instrumentation in the VM-Fail path
isn't exactly safe, but if VMREAD fails the kernel/system is likely hosed
anyways, and logging that there is a fatal error is more important than
*maybe* encountering slightly unsafe instrumentation.
Reported-by: Su Hui <suhui@...china.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/vmx/vmenter.S | 8 ++++----
arch/x86/kvm/vmx/vmx.c | 18 ++++++++++++++----
arch/x86/kvm/vmx/vmx_ops.h | 9 ++++++++-
3 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 07e927d4d099..be275a0410a8 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -303,10 +303,8 @@ SYM_FUNC_START(vmx_do_nmi_irqoff)
VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx
SYM_FUNC_END(vmx_do_nmi_irqoff)
-
-.section .text, "ax"
-
#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
/**
* vmread_error_trampoline - Trampoline from inline asm to vmread_error()
* @field: VMCS field encoding that failed
@@ -335,7 +333,7 @@ SYM_FUNC_START(vmread_error_trampoline)
mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2
mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1
- call vmread_error
+ call vmread_error_trampoline2
/* Zero out @fault, which will be popped into the result register. */
_ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
@@ -357,6 +355,8 @@ SYM_FUNC_START(vmread_error_trampoline)
SYM_FUNC_END(vmread_error_trampoline)
#endif
+.section .text, "ax"
+
SYM_FUNC_START(vmx_do_interrupt_irqoff)
VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
SYM_FUNC_END(vmx_do_interrupt_irqoff)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0ecf4be2c6af..d7cf35edda1b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -441,13 +441,23 @@ do { \
pr_warn_ratelimited(fmt); \
} while (0)
-void vmread_error(unsigned long field, bool fault)
+noinline void vmread_error(unsigned long field)
{
- if (fault)
+ vmx_insn_failed("vmread failed: field=%lx\n", field);
+}
+
+#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
+{
+ if (fault) {
kvm_spurious_fault();
- else
- vmx_insn_failed("vmread failed: field=%lx\n", field);
+ } else {
+ instrumentation_begin();
+ vmread_error(field);
+ instrumentation_end();
+ }
}
+#endif
noinline void vmwrite_error(unsigned long field, unsigned long value)
{
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index ce47dc265f89..5fa74779a37a 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -10,7 +10,7 @@
#include "vmcs.h"
#include "../x86.h"
-void vmread_error(unsigned long field, bool fault);
+void vmread_error(unsigned long field);
void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
@@ -31,6 +31,13 @@ void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
* void vmread_error_trampoline(unsigned long field, bool fault);
*/
extern unsigned long vmread_error_trampoline;
+
+/*
+ * The second VMREAD error trampoline, called from the assembly trampoline,
+ * exists primarily to enable instrumentation for the VM-Fail path.
+ */
+void vmread_error_trampoline2(unsigned long field, bool fault);
+
#endif
static __always_inline void vmcs_check16(unsigned long field)
base-commit: 255006adb3da71bb75c334453786df781b415f54
--
Powered by blists - more mailing lists