[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <832eefa4e73f08f196ef009263a145d1fb1d7363.1690322424.git.isaku.yamahata@intel.com>
Date: Tue, 25 Jul 2023 15:13:30 -0700
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
hang.yuan@...el.com, tina.zhang@...el.com
Subject: [PATCH v15 019/115] KVM: x86, tdx: Make KVM_CAP_MAX_VCPUS backend specific
From: Isaku Yamahata <isaku.yamahata@...el.com>
TDX has its own limitation on the maximum number of vcpus that the guest
can accommodate. Allow x86 kvm backend to implement its own KVM_ENABLE_CAP
handler and implement TDX backend for KVM_CAP_MAX_VCPUS. user space VMM,
e.g. qemu, can specify its value instead of KVM_MAX_VCPUS.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/include/asm/kvm-x86-ops.h | 2 ++
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/vmx/main.c | 22 ++++++++++++++++++++++
arch/x86/kvm/vmx/tdx.c | 30 ++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/tdx.h | 3 +++
arch/x86/kvm/vmx/x86_ops.h | 5 +++++
arch/x86/kvm/x86.c | 4 ++++
7 files changed, 68 insertions(+)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 7b22bf8d6686..ba79b97b2455 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -21,6 +21,8 @@ KVM_X86_OP(hardware_unsetup)
KVM_X86_OP(has_emulated_msr)
KVM_X86_OP(vcpu_after_set_cpuid)
KVM_X86_OP(is_vm_type_supported)
+KVM_X86_OP_OPTIONAL(max_vcpus);
+KVM_X86_OP_OPTIONAL(vm_enable_cap)
KVM_X86_OP(vm_init)
KVM_X86_OP_OPTIONAL(vm_destroy)
KVM_X86_OP_OPTIONAL_RET0(vcpu_precreate)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dc83bb543307..f2eefc322d42 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1546,7 +1546,9 @@ struct kvm_x86_ops {
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
bool (*is_vm_type_supported)(unsigned long vm_type);
+ int (*max_vcpus)(struct kvm *kvm);
unsigned int vm_size;
+ int (*vm_enable_cap)(struct kvm *kvm, struct kvm_enable_cap *cap);
int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm);
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index fcd2516088ce..76ea59374ad0 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -6,6 +6,7 @@
#include "nested.h"
#include "pmu.h"
#include "tdx.h"
+#include "tdx_arch.h"
static bool enable_tdx __ro_after_init;
module_param_named(tdx, enable_tdx, bool, 0444);
@@ -16,6 +17,17 @@ static bool vt_is_vm_type_supported(unsigned long type)
(enable_tdx && tdx_is_vm_type_supported(type));
}
+static int vt_max_vcpus(struct kvm *kvm)
+{
+ if (!kvm)
+ return KVM_MAX_VCPUS;
+
+ if (is_td(kvm))
+ return min3(kvm->max_vcpus, KVM_MAX_VCPUS, TDX_MAX_VCPUS);
+
+ return kvm->max_vcpus;
+}
+
static int vt_hardware_enable(void)
{
int ret;
@@ -43,6 +55,14 @@ static __init int vt_hardware_setup(void)
return 0;
}
+static int vt_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ if (is_td(kvm))
+ return tdx_vm_enable_cap(kvm, cap);
+
+ return -EINVAL;
+}
+
static int vt_vm_init(struct kvm *kvm)
{
if (is_td(kvm))
@@ -80,7 +100,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.has_emulated_msr = vmx_has_emulated_msr,
.is_vm_type_supported = vt_is_vm_type_supported,
+ .max_vcpus = vt_max_vcpus,
.vm_size = sizeof(struct kvm_vmx),
+ .vm_enable_cap = vt_vm_enable_cap,
.vm_init = vt_vm_init,
.vm_destroy = vmx_vm_destroy,
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 229c079d7686..8901ae86c9da 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -17,6 +17,36 @@
offsetof(struct tdsysinfo_struct, cpuid_configs)) \
/ sizeof(struct tdx_cpuid_config))
+int tdx_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ int r;
+
+ switch (cap->cap) {
+ case KVM_CAP_MAX_VCPUS: {
+ if (cap->flags || cap->args[0] == 0)
+ return -EINVAL;
+ if (cap->args[0] > KVM_MAX_VCPUS)
+ return -E2BIG;
+ if (cap->args[0] > TDX_MAX_VCPUS)
+ return -E2BIG;
+
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus)
+ r = -EBUSY;
+ else {
+ kvm->max_vcpus = cap->args[0];
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
{
struct kvm_tdx_capabilities __user *user_caps;
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index 473013265bd8..22c0b57f69ca 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -3,6 +3,9 @@
#define __KVM_X86_TDX_H
#ifdef CONFIG_INTEL_TDX_HOST
+
+#include "tdx_ops.h"
+
struct kvm_tdx {
struct kvm kvm;
/* TDX specific members follow. */
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 1a6bf336ca60..cb96a9af9e79 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -139,11 +139,16 @@ void vmx_setup_mce(struct kvm_vcpu *vcpu);
int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops);
bool tdx_is_vm_type_supported(unsigned long type);
+int tdx_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
#else
static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -EOPNOTSUPP; }
static inline bool tdx_is_vm_type_supported(unsigned long type) { return false; }
+static inline int tdx_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ return -EINVAL;
+};
static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d700da8ff4f2..6970e6198608 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4572,6 +4572,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
+ if (kvm_x86_ops.max_vcpus)
+ r = static_call(kvm_x86_max_vcpus)(kvm);
break;
case KVM_CAP_MAX_VCPU_ID:
r = KVM_MAX_VCPU_IDS;
@@ -6505,6 +6507,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
break;
default:
r = -EINVAL;
+ if (kvm_x86_ops.vm_enable_cap)
+ r = static_call(kvm_x86_vm_enable_cap)(kvm, cap);
break;
}
return r;
--
2.25.1
Powered by blists - more mailing lists