[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241030033514.1728937-2-zack.rusin@broadcom.com>
Date: Tue, 29 Oct 2024 23:34:07 -0400
From: Zack Rusin <zack.rusin@...adcom.com>
To: kvm@...r.kernel.org
Cc: Zack Rusin <zack.rusin@...adcom.com>,
Doug Covelli <doug.covelli@...adcom.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Shuah Khan <shuah@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Isaku Yamahata <isaku.yamahata@...el.com>,
Joel Stanley <joel@....id.au>,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: [PATCH 1/3] KVM: x86: Allow enabling of the vmware backdoor via a cap
Allow enabling of the vmware backdoor on a per-vm basis. The vmware
backdoor could only be enabled systemwide via the kernel parameter
kvm.enable_vmware_backdoor which required modifying the kernels boot
parameters.
Add the KVM_CAP_X86_VMWARE_BACKDOOR cap that enables the backdoor at the
hypervisor level and allows setting it on a per-vm basis.
The default is whatever kvm.enable_vmware_backdoor was set to, which
by default is false.
Signed-off-by: Zack Rusin <zack.rusin@...adcom.com>
Cc: Doug Covelli <doug.covelli@...adcom.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Jonathan Corbet <corbet@....net>
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: x86@...nel.org
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Shuah Khan <shuah@...nel.org>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Isaku Yamahata <isaku.yamahata@...el.com>
Cc: Joel Stanley <joel@....id.au>
Cc: Zack Rusin <zack.rusin@...adcom.com>
Cc: kvm@...r.kernel.org
Cc: linux-doc@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Cc: linux-kselftest@...r.kernel.org
---
Documentation/virt/kvm/api.rst | 15 +++++++++++++++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/emulate.c | 5 +++--
arch/x86/kvm/svm/svm.c | 6 +++---
arch/x86/kvm/vmx/vmx.c | 4 ++--
arch/x86/kvm/x86.c | 14 ++++++++++++++
arch/x86/kvm/x86.h | 7 +++++--
include/uapi/linux/kvm.h | 1 +
8 files changed, 44 insertions(+), 9 deletions(-)
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index b3be87489108..33ef3cc785e4 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8186,6 +8186,21 @@ KVM exits with the register state of either the L1 or L2 guest
depending on which executed at the time of an exit. Userspace must
take care to differentiate between these cases.
+7.37 KVM_CAP_X86_VMWARE_BACKDOOR
+--------------------------------
+
+:Architectures: x86
+:Parameters: args[0] whether the feature should be enabled or not
+:Returns: 0 on success.
+
+The presence of this capability indicates that KVM supports
+enabling of the VMware backdoor via the enable cap interface.
+
+When enabled KVM will support VMware backdoor PV interface. The
+default value for it is set via the kvm.enable_vmware_backdoor
+kernel parameter (false when not set). Must be set before any
+VCPUs have been created.
+
8. Other capabilities.
======================
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4a68cb3eba78..7fcf185e337f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1403,6 +1403,7 @@ struct kvm_arch {
#ifdef CONFIG_KVM_XEN
struct kvm_xen xen;
#endif
+ bool vmware_backdoor_enabled;
bool backwards_tsc_observed;
bool boot_vcpu_runs_old_kvmclock;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e72aed25d721..8aee73f9a560 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2563,7 +2563,7 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
* VMware allows access to these ports even if denied
* by TSS I/O permission bitmap. Mimic behavior.
*/
- if (enable_vmware_backdoor &&
+ if (kvm_vmware_backdoor_enabled(ctxt->vcpu) &&
((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
return true;
@@ -3917,7 +3917,8 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
* VMware allows access to these Pseduo-PMCs even when read via RDPMC
* in Ring3 when CR4.PCE=0.
*/
- if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
+ if (kvm_vmware_backdoor_enabled(ctxt->vcpu) &&
+ is_vmware_backdoor_pmc(rcx))
return X86EMUL_CONTINUE;
/*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5ab2c92c7331..a55655337cfa 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -314,7 +314,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
svm_leave_nested(vcpu);
svm_set_gif(svm, true);
/* #GP intercept is still needed for vmware backdoor */
- if (!enable_vmware_backdoor)
+ if (!kvm_vmware_backdoor_enabled(vcpu))
clr_exception_intercept(svm, GP_VECTOR);
/*
@@ -1262,7 +1262,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
* We intercept those #GP and allow access to them anyway
* as VMware does.
*/
- if (enable_vmware_backdoor)
+ if (kvm_vmware_backdoor_enabled(vcpu))
set_exception_intercept(svm, GP_VECTOR);
svm_set_intercept(svm, INTERCEPT_INTR);
@@ -2401,7 +2401,7 @@ static int gp_interception(struct kvm_vcpu *vcpu)
opcode = svm_instr_opcode(vcpu);
if (opcode == NONE_SVM_INSTR) {
- if (!enable_vmware_backdoor)
+ if (!kvm_vmware_backdoor_enabled(vcpu))
goto reinject;
/*
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 733a0c45d1a6..6b874c629b82 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -885,7 +885,7 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
* We intercept those #GP and allow access to them anyway
* as VMware does.
*/
- if (enable_vmware_backdoor)
+ if (kvm_vmware_backdoor_enabled(vcpu))
eb |= (1u << GP_VECTOR);
if ((vcpu->guest_debug &
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -5249,7 +5249,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
- WARN_ON_ONCE(!enable_vmware_backdoor);
+ WARN_ON_ONCE(!kvm_vmware_backdoor_enabled(vcpu));
/*
* VMware backdoor emulation on #GP interception only handles
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c983c8e434b8..d7071907d6a5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4688,6 +4688,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IRQFD_RESAMPLE:
case KVM_CAP_MEMORY_FAULT_INFO:
case KVM_CAP_X86_GUEST_MODE:
+ case KVM_CAP_X86_VMWARE_BACKDOOR:
r = 1;
break;
case KVM_CAP_PRE_FAULT_MEMORY:
@@ -6772,6 +6773,17 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
mutex_unlock(&kvm->lock);
break;
}
+ case KVM_CAP_X86_VMWARE_BACKDOOR:
+ r = -EINVAL;
+ if (cap->args[0] & ~1)
+ break;
+ mutex_lock(&kvm->lock);
+ if (!kvm->created_vcpus) {
+ kvm->arch.vmware_backdoor_enabled = cap->args[0];
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ break;
default:
r = -EINVAL;
break;
@@ -12685,6 +12697,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.guest_can_read_msr_platform_info = true;
kvm->arch.enable_pmu = enable_pmu;
+ kvm->arch.vmware_backdoor_enabled = enable_vmware_backdoor;
+
#if IS_ENABLED(CONFIG_HYPERV)
spin_lock_init(&kvm->arch.hv_root_tdp_lock);
kvm->arch.hv_root_tdp = INVALID_PAGE;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 50596f6f8320..ae278a48566a 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -376,14 +376,17 @@ static inline bool kvm_mpx_supported(void)
extern unsigned int min_timer_period_us;
-extern bool enable_vmware_backdoor;
-
extern int pi_inject_timer;
extern bool report_ignored_msrs;
extern bool eager_page_split;
+static inline bool kvm_vmware_backdoor_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.vmware_backdoor_enabled;
+}
+
static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
if (report_ignored_msrs)
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 637efc055145..c7b5f1c2ee1c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -933,6 +933,7 @@ struct kvm_enable_cap {
#define KVM_CAP_PRE_FAULT_MEMORY 236
#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
#define KVM_CAP_X86_GUEST_MODE 238
+#define KVM_CAP_X86_VMWARE_BACKDOOR 239
struct kvm_irq_routing_irqchip {
__u32 irqchip;
--
2.43.0
Powered by blists - more mailing lists