[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250422161304.579394-5-zack.rusin@broadcom.com>
Date: Tue, 22 Apr 2025 12:12:23 -0400
From: Zack Rusin <zack.rusin@...adcom.com>
To: linux-kernel@...r.kernel.org
Cc: Zack Rusin <zack.rusin@...adcom.com>,
Doug Covelli <doug.covelli@...adcom.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
kvm@...r.kernel.org,
linux-doc@...r.kernel.org
Subject: [PATCH v2 4/5] KVM: x86: Add support for legacy VMware backdoors in nested setups
Allow handling VMware backdoors by the L0 monitor. This is required on
setups running Windows VBS, where the L1 will be running Hyper-V which
can't handle VMware backdoors. Thus on Windows VBS legacy VMware backdoor
calls issued by the userspace will end up in Hyper-V (L1) and endup
throwing an error.
Add a KVM cap that, in nested setups, allows the legacy VMware backdoor
to be handled by the L0 monitor. Thanks to this we can make sure that
VMware backdoor is always handled by the correct monitor.
Signed-off-by: Zack Rusin <zack.rusin@...adcom.com>
Cc: Doug Covelli <doug.covelli@...adcom.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Jonathan Corbet <corbet@....net>
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: x86@...nel.org
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Zack Rusin <zack.rusin@...adcom.com>
Cc: kvm@...r.kernel.org
Cc: linux-doc@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
---
Documentation/virt/kvm/api.rst | 14 +++++++++++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/Kconfig | 1 +
arch/x86/kvm/kvm_vmware.h | 42 +++++++++++++++++++++++++++++++++
arch/x86/kvm/svm/nested.c | 6 +++++
arch/x86/kvm/svm/svm.c | 3 ++-
arch/x86/kvm/vmx/nested.c | 6 +++++
arch/x86/kvm/x86.c | 8 +++++++
include/uapi/linux/kvm.h | 1 +
9 files changed, 81 insertions(+), 1 deletion(-)
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 6d3d2a509848..55bd464ebf95 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8322,6 +8322,20 @@ userspace handling of hypercalls is discouraged. To implement
such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO
(all except s390).
+7.39 KVM_CAP_X86_VMWARE_NESTED_BACKDOOR_L0
+------------------------------------------
+
+:Architectures: x86
+:Parameters: args[0] whether the feature should be enabled or not
+:Returns: 0 on success.
+
+Capability allows VMware backdoors to be handled by L0 when running
+on nested configurations. This is required when, for example
+running Windows guest with Hyper-V VBS enabled - in that configuration
+the VMware backdoor calls issued by VMware tools would endup in Hyper-V
+(L1) which doesn't handle VMware backdoor. Enable this option to have
+VMware backdoor sent to L0 monitor.
+
8. Other capabilities.
======================
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 86bacda2802e..2a806aa93a9e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1197,6 +1197,7 @@ struct kvm_xen {
struct kvm_vmware {
bool backdoor_enabled;
bool hypercall_enabled;
+ bool nested_backdoor_l0_enabled;
};
#endif
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f817601924bd..8fefde6f2e78 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -190,6 +190,7 @@ config KVM_VMWARE
formatted IN, OUT and RDPMC instructions which need to be
intercepted.
- VMware hypercall interface: VMware hypercalls exit to userspace
+ - VMware legacy backdoor handling in L0
If unsure, say "Y".
diff --git a/arch/x86/kvm/kvm_vmware.h b/arch/x86/kvm/kvm_vmware.h
index 846b90091a2a..d90bcf73bae4 100644
--- a/arch/x86/kvm/kvm_vmware.h
+++ b/arch/x86/kvm/kvm_vmware.h
@@ -9,6 +9,9 @@
#include <linux/kvm_host.h>
+#include "asm/vmware.h"
+#include "x86.h"
+
#ifdef CONFIG_KVM_VMWARE
#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
@@ -98,6 +101,35 @@ static inline bool kvm_vmware_hypercall_enabled(struct kvm *kvm)
return kvm->arch.vmware.hypercall_enabled;
}
+static inline bool kvm_vmware_nested_backdoor_l0_enabled(struct kvm *kvm)
+{
+ return kvm->arch.vmware.backdoor_enabled &&
+ kvm->arch.vmware.nested_backdoor_l0_enabled;
+}
+
+static inline bool kvm_vmware_wants_backdoor_to_l0(struct kvm_vcpu *vcpu, u32 cpl)
+{
+ /* We only care about the lower 32 bits */
+ const unsigned long mask = 0xffffffff;
+ const unsigned long port_mask = 0xffff;
+ unsigned long rax, rdx;
+
+ if (!kvm_vmware_nested_backdoor_l0_enabled(vcpu->kvm))
+ return false;
+
+ if (cpl != 3)
+ return false;
+
+ rax = kvm_rax_read(vcpu) & mask;
+ if (rax == VMWARE_HYPERVISOR_MAGIC) {
+ rdx = kvm_rdx_read(vcpu) & port_mask;
+ return (rdx == VMWARE_HYPERVISOR_PORT ||
+ rdx == VMWARE_HYPERVISOR_PORT_HB);
+ }
+
+ return false;
+}
+
void kvm_vmware_init_vm(struct kvm *kvm);
int kvm_vmware_hypercall(struct kvm_vcpu *vcpu);
@@ -142,6 +174,16 @@ static inline int kvm_vmware_hypercall(struct kvm_vcpu *vcpu)
return 0;
}
+static inline bool kvm_vmware_nested_backdoor_l0_enabled(struct kvm *kvm)
+{
+ return false;
+}
+
+static inline bool kvm_vmware_wants_backdoor_to_l0(struct kvm_vcpu *vcpu, u32 cpl)
+{
+ return false;
+}
+
#endif /* CONFIG_KVM_VMWARE */
#endif /* __ARCH_X86_KVM_VMWARE_H__ */
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 04c375bf1ac2..74c472e51479 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -22,6 +22,7 @@
#include <asm/debugreg.h>
#include "kvm_emulate.h"
+#include "kvm_vmware.h"
#include "trace.h"
#include "mmu.h"
#include "x86.h"
@@ -1517,6 +1518,11 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
svm->vcpu.arch.apf.host_apf_flags)
/* Trap async PF even if not shadowing */
return NESTED_EXIT_HOST;
+#ifdef CONFIG_KVM_VMWARE
+ else if ((exit_code == (SVM_EXIT_EXCP_BASE + GP_VECTOR)) &&
+ kvm_vmware_wants_backdoor_to_l0(vcpu, to_svm(vcpu)->vmcb->save.cpl))
+ return NESTED_EXIT_HOST;
+#endif
break;
}
case SVM_EXIT_VMMCALL:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index be106bd60553..96996e7f9de4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2407,7 +2407,8 @@ static int gp_interception(struct kvm_vcpu *vcpu)
* VMware backdoor emulation on #GP interception only handles
* IN{S}, OUT{S}, and RDPMC.
*/
- if (!is_guest_mode(vcpu))
+ if (!is_guest_mode(vcpu) ||
+ kvm_vmware_wants_backdoor_to_l0(vcpu, svm_get_cpl(vcpu)))
return kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
} else {
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ed8a3cb53961..ff8a1dbbba01 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -10,6 +10,7 @@
#include "x86.h"
#include "cpuid.h"
#include "hyperv.h"
+#include "kvm_vmware.h"
#include "mmu.h"
#include "nested.h"
#include "pmu.h"
@@ -6386,6 +6387,11 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
return true;
else if (is_ve_fault(intr_info))
return true;
+#ifdef CONFIG_KVM_VMWARE
+ else if (is_gp_fault(intr_info) &&
+ kvm_vmware_wants_backdoor_to_l0(vcpu, vmx_get_cpl(vcpu)))
+ return true;
+#endif
return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 300cef9a37e2..5dc57bc57851 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4653,6 +4653,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_KVM_VMWARE
case KVM_CAP_X86_VMWARE_BACKDOOR:
case KVM_CAP_X86_VMWARE_HYPERCALL:
+ case KVM_CAP_X86_VMWARE_NESTED_BACKDOOR_L0:
#endif
r = 1;
break;
@@ -6754,6 +6755,13 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm->arch.vmware.hypercall_enabled = cap->args[0];
r = 0;
break;
+ case KVM_CAP_X86_VMWARE_NESTED_BACKDOOR_L0:
+ r = -EINVAL;
+ if (cap->args[0] & ~1)
+ break;
+ kvm->arch.vmware.nested_backdoor_l0_enabled = cap->args[0];
+ r = 0;
+ break;
#endif
default:
r = -EINVAL;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index adf1a1449c06..f5d63c0c79f5 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -955,6 +955,7 @@ struct kvm_enable_cap {
#define KVM_CAP_X86_GUEST_MODE 238
#define KVM_CAP_X86_VMWARE_BACKDOOR 239
#define KVM_CAP_X86_VMWARE_HYPERCALL 240
+#define KVM_CAP_X86_VMWARE_NESTED_BACKDOOR_L0 241
struct kvm_irq_routing_irqchip {
__u32 irqchip;
--
2.48.1
Powered by blists - more mailing lists