lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aIEkrc7Mdf2ia1Mm@google.com>
Date: Wed, 23 Jul 2025 11:06:37 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Zack Rusin <zack.rusin@...adcom.com>
Cc: linux-kernel@...r.kernel.org, Doug Covelli <doug.covelli@...adcom.com>, 
	Paolo Bonzini <pbonzini@...hat.com>, Jonathan Corbet <corbet@....net>, 
	Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>, 
	Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org, 
	"H. Peter Anvin" <hpa@...or.com>, kvm@...r.kernel.org, linux-doc@...r.kernel.org
Subject: Re: [PATCH v2 2/5] KVM: x86: Allow enabling of the vmware backdoor
 via a cap

On Tue, Apr 22, 2025, Zack Rusin wrote:
> @@ -6735,6 +6734,19 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  	}
> +#ifdef CONFIG_KVM_VMWARE
> +	case KVM_CAP_X86_VMWARE_BACKDOOR:

I much perfer using a single KVM_CAP_X86_VMWARE capability.  More below.

> +		r = -EINVAL;
> +		if (cap->args[0] & ~1)

Using bit 0 for "enable" needs to be #defined arch/x86/include/uapi/asm/kvm.h.

At that point, adding more capabilities for the other VMware functionality doesn't
make much sense, especially since the capabilities that are added in later patches
don't have the kvm->created_vcpus protection, i.e. are likely buggy.

E.g. with the below diff (completely untested, probably won't apply cleanly?)
spread across three-ish patches, the accessors can be:

static inline bool kvm_is_vmware_cap_enabled(struct kvm *kvm, u64 cap)
{
	return kvm->arch.vmware.caps & cap;
}

static inline bool kvm_is_vmware_backdoor_enabled(struct kvm_vcpu *vcpu)
{
	return kvm_is_vmware_cap_enabled(kvm, KVM_VMWARE_ENABLE_BACKDOOR);
}

static inline bool kvm_is_vmware_hypercall_enabled(struct kvm *kvm)
{
	return kvm_is_vmware_cap_enabled(kvm, KVM_VMWARE_ENABLE_HYPERCALL);
}

static inline bool kvm_vmware_nested_backdoor_l0_enabled(struct kvm *kvm)
{
	return kvm_is_vmware_backdoor_enabled(kvm) &&
	       kvm_is_vmware_cap_enabled(kvm, KVM_VMWARE_ENABLE_NESTED_BACKDOOR);
}

---
 arch/x86/include/asm/kvm_host.h |  4 +---
 arch/x86/include/uapi/asm/kvm.h |  4 ++++
 arch/x86/kvm/x86.c              | 34 ++++++++++++---------------------
 3 files changed, 17 insertions(+), 25 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 639c49db0106..1433cdd14675 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1219,9 +1219,7 @@ struct kvm_xen {
 #ifdef CONFIG_KVM_VMWARE
 /* VMware emulation context */
 struct kvm_vmware {
-	bool backdoor_enabled;
-	bool hypercall_enabled;
-	bool nested_backdoor_l0_enabled;
+	u64 caps;
 };
 #endif
 
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index e019111e2150..ae578422d6f4 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -1013,4 +1013,8 @@ struct kvm_tdx_init_mem_region {
 	__u64 nr_pages;
 };
 
+#define KVM_VMWARE_ENABLE_BACKDOOR		_BITULL(0)
+#define KVM_VMWARE_ENABLE_HYPERCALL		_BITULL(1)
+#define KVM_VMWARE_ENABLE_NESTED_BACKDOOR	_BITULL(2)
+
 #endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7234333a92d8..b9e2faf0ceb7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -126,6 +126,10 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
 
+#define KVM_CAP_VMWARE_VALID_MASK (KVM_VMWARE_CAP_ENABLE_BACKDOOR | \
+				   KVM_VMWARE_ENABLE_HYPERCALL | \
+				   KVM_VMWARE_ENABLE_NESTED_BACKDOOR)
+
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -4708,11 +4712,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_IRQFD_RESAMPLE:
 	case KVM_CAP_MEMORY_FAULT_INFO:
 	case KVM_CAP_X86_GUEST_MODE:
-#ifdef CONFIG_KVM_VMWARE
-	case KVM_CAP_X86_VMWARE_BACKDOOR:
-	case KVM_CAP_X86_VMWARE_HYPERCALL:
-	case KVM_CAP_X86_VMWARE_NESTED_BACKDOOR_L0:
-#endif
 		r = 1;
 		break;
 	case KVM_CAP_PRE_FAULT_MEMORY:
@@ -4836,6 +4835,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_READONLY_MEM:
 		r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
 		break;
+#ifdef CONFIG_KVM_VMWARE
+	case KVM_CAP_X86_VMWARE:
+		return KVM_CAP_VMWARE_VALID_MASK;
+#endif
 	default:
 		break;
 	}
@@ -6669,31 +6672,18 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
 		break;
 	}
 #ifdef CONFIG_KVM_VMWARE
-	case KVM_CAP_X86_VMWARE_BACKDOOR:
+	case KVM_CAP_X86_VMWARE:
 		r = -EINVAL;
-		if (cap->args[0] & ~1)
+		if (cap->args[0] & ~KVM_CAP_VMWARE_VALID_MASK)
 			break;
+
 		mutex_lock(&kvm->lock);
 		if (!kvm->created_vcpus) {
-			kvm->arch.vmware.backdoor_enabled = cap->args[0];
+			kvm->arch.vmware.caps = cap->args[0];
 			r = 0;
 		}
 		mutex_unlock(&kvm->lock);
 		break;
-	case KVM_CAP_X86_VMWARE_HYPERCALL:
-		r = -EINVAL;
-		if (cap->args[0] & ~1)
-			break;
-		kvm->arch.vmware.hypercall_enabled = cap->args[0];
-		r = 0;
-		break;
-	case KVM_CAP_X86_VMWARE_NESTED_BACKDOOR_L0:
-		r = -EINVAL;
-		if (cap->args[0] & ~1)
-			break;
-		kvm->arch.vmware.nested_backdoor_l0_enabled = cap->args[0];
-		r = 0;
-		break;
 #endif
 	default:
 		r = -EINVAL;

base-commit: 77a53b6f5d1c2dabef34d890d212910ed1f43bcb
--

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ