lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7cc28677-f7d1-4aba-8557-66c685115074@linux.intel.com>
Date: Wed, 24 Jan 2024 09:17:15 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
 isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
 erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
 Sagi Shahar <sagis@...gle.com>, Kai Huang <kai.huang@...el.com>,
 chen.bo@...el.com, hang.yuan@...el.com, tina.zhang@...el.com
Subject: Re: [PATCH v18 023/121] KVM: TDX: Make KVM_CAP_MAX_VCPUS backend
 specific



On 1/23/2024 7:52 AM, isaku.yamahata@...el.com wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
>
> TDX has its own limitation on the maximum number of vcpus that the guest
> can accommodate.  Allow x86 kvm backend to implement its own KVM_ENABLE_CAP
> handler and implement TDX backend for KVM_CAP_MAX_VCPUS.  user space VMM,
> e.g. qemu, can specify its value instead of KVM_MAX_VCPUS.
For legacy VM, KVM just provides the interface to query the max_vcpus.
Why TD needs to provide a interface for userspace to set the limitation?
What's the scenario?


>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> v18:
> - use TDX instead of "x86, tdx" in subject
> - use min(max_vcpu, TDX_MAX_VCPU) instead of
>    min3(max_vcpu, KVM_MAX_VCPU, TDX_MAX_VCPU)
> - make "if (KVM_MAX_VCPU) and if (TDX_MAX_VCPU)" into one if statement
> ---
>   arch/x86/include/asm/kvm-x86-ops.h |  2 ++
>   arch/x86/include/asm/kvm_host.h    |  2 ++
>   arch/x86/kvm/vmx/main.c            | 22 ++++++++++++++++++++++
>   arch/x86/kvm/vmx/tdx.c             | 29 +++++++++++++++++++++++++++++
>   arch/x86/kvm/vmx/x86_ops.h         |  5 +++++
>   arch/x86/kvm/x86.c                 |  4 ++++
>   6 files changed, 64 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 943b21b8b106..2f976c0f3116 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -21,6 +21,8 @@ KVM_X86_OP(hardware_unsetup)
>   KVM_X86_OP(has_emulated_msr)
>   KVM_X86_OP(vcpu_after_set_cpuid)
>   KVM_X86_OP(is_vm_type_supported)
> +KVM_X86_OP_OPTIONAL(max_vcpus);
> +KVM_X86_OP_OPTIONAL(vm_enable_cap)
>   KVM_X86_OP(vm_init)
>   KVM_X86_OP_OPTIONAL(vm_destroy)
>   KVM_X86_OP_OPTIONAL_RET0(vcpu_precreate)
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 26f4668b0273..db44a92e5659 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1602,7 +1602,9 @@ struct kvm_x86_ops {
>   	void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
>   
>   	bool (*is_vm_type_supported)(unsigned long vm_type);
> +	int (*max_vcpus)(struct kvm *kvm);
>   	unsigned int vm_size;
> +	int (*vm_enable_cap)(struct kvm *kvm, struct kvm_enable_cap *cap);
>   	int (*vm_init)(struct kvm *kvm);
>   	void (*vm_destroy)(struct kvm *kvm);
>   
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 50da807d7aea..4611f305a450 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -6,6 +6,7 @@
>   #include "nested.h"
>   #include "pmu.h"
>   #include "tdx.h"
> +#include "tdx_arch.h"
>   
>   static bool enable_tdx __ro_after_init;
>   module_param_named(tdx, enable_tdx, bool, 0444);
> @@ -16,6 +17,17 @@ static bool vt_is_vm_type_supported(unsigned long type)
>   		(enable_tdx && tdx_is_vm_type_supported(type));
>   }
>   
> +static int vt_max_vcpus(struct kvm *kvm)
> +{
> +	if (!kvm)
> +		return KVM_MAX_VCPUS;
> +
> +	if (is_td(kvm))
> +		return min(kvm->max_vcpus, TDX_MAX_VCPUS);
> +
> +	return kvm->max_vcpus;
> +}
> +
>   static int vt_hardware_enable(void)
>   {
>   	int ret;
> @@ -54,6 +66,14 @@ static void vt_hardware_unsetup(void)
>   	vmx_hardware_unsetup();
>   }
>   
> +static int vt_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
> +{
> +	if (is_td(kvm))
> +		return tdx_vm_enable_cap(kvm, cap);
> +
> +	return -EINVAL;
> +}
> +
>   static int vt_vm_init(struct kvm *kvm)
>   {
>   	if (is_td(kvm))
> @@ -91,7 +111,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
>   	.has_emulated_msr = vmx_has_emulated_msr,
>   
>   	.is_vm_type_supported = vt_is_vm_type_supported,
> +	.max_vcpus = vt_max_vcpus,
>   	.vm_size = sizeof(struct kvm_vmx),
> +	.vm_enable_cap = vt_vm_enable_cap,
>   	.vm_init = vt_vm_init,
>   	.vm_destroy = vmx_vm_destroy,
>   
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 8c463407f8a8..876ad7895b88 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -100,6 +100,35 @@ struct tdx_info {
>   /* Info about the TDX module. */
>   static struct tdx_info *tdx_info;
>   
> +int tdx_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
> +{
> +	int r;
> +
> +	switch (cap->cap) {
> +	case KVM_CAP_MAX_VCPUS: {
> +		if (cap->flags || cap->args[0] == 0)
> +			return -EINVAL;
> +		if (cap->args[0] > KVM_MAX_VCPUS ||
> +		    cap->args[0] > TDX_MAX_VCPUS)
> +			return -E2BIG;
> +
> +		mutex_lock(&kvm->lock);
> +		if (kvm->created_vcpus)
> +			r = -EBUSY;
> +		else {
> +			kvm->max_vcpus = cap->args[0];
> +			r = 0;
> +		}
> +		mutex_unlock(&kvm->lock);
> +		break;
> +	}
> +	default:
> +		r = -EINVAL;
> +		break;
> +	}
> +	return r;
> +}
> +
>   static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
>   {
>   	struct kvm_tdx_capabilities __user *user_caps;
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index 6e238142b1e8..3a3be66888da 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -139,12 +139,17 @@ int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops);
>   void tdx_hardware_unsetup(void);
>   bool tdx_is_vm_type_supported(unsigned long type);
>   
> +int tdx_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
>   int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
>   #else
>   static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -EOPNOTSUPP; }
>   static inline void tdx_hardware_unsetup(void) {}
>   static inline bool tdx_is_vm_type_supported(unsigned long type) { return false; }
>   
> +static inline int tdx_vm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
> +{
> +	return -EINVAL;
> +};
>   static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
>   #endif
>   
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index dd3a23d56621..a1389ddb1b33 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4726,6 +4726,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>   		break;
>   	case KVM_CAP_MAX_VCPUS:
>   		r = KVM_MAX_VCPUS;
> +		if (kvm_x86_ops.max_vcpus)
> +			r = static_call(kvm_x86_max_vcpus)(kvm);
>   		break;
>   	case KVM_CAP_MAX_VCPU_ID:
>   		r = KVM_MAX_VCPU_IDS;
> @@ -6683,6 +6685,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
>   		break;
>   	default:
>   		r = -EINVAL;
> +		if (kvm_x86_ops.vm_enable_cap)
> +			r = static_call(kvm_x86_vm_enable_cap)(kvm, cap);
>   		break;
>   	}
>   	return r;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ