[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <bec70bdd4a6e30b08eb252b939d7832b08eb4930.1667110240.git.isaku.yamahata@intel.com>
Date: Sat, 29 Oct 2022 23:22:20 -0700
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>
Subject: [PATCH v10 019/108] KVM: TDX: Add place holder for TDX VM specific mem_enc_op ioctl
From: Isaku Yamahata <isaku.yamahata@...el.com>
Add a place holder function for TDX specific VM-scoped ioctl as mem_enc_op.
TDX specific sub-commands will be added to retrieve/pass TDX specific
parameters.
KVM_MEMORY_ENCRYPT_OP was introduced for VM-scoped operations specific for
guest state-protected VM. It defined subcommands for technology-specific
operations under KVM_MEMORY_ENCRYPT_OP. Despite its name, the subcommands
are not limited to memory encryption, but various technology-specific
operations are defined. It's natural to repurpose KVM_MEMORY_ENCRYPT_OP
for TDX specific operations and define subcommands.
TDX requires VM-scoped TDX-specific operations for device model, for
example, qemu. Getting system-wide parameters, TDX-specific VM
initialization.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/vmx/main.c | 9 +++++++++
arch/x86/kvm/vmx/tdx.c | 26 ++++++++++++++++++++++++++
arch/x86/kvm/vmx/x86_ops.h | 4 ++++
3 files changed, 39 insertions(+)
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index aedba5acb8eb..b4e4c6c677f6 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -63,6 +63,14 @@ static void vt_vm_free(struct kvm *kvm)
return tdx_vm_free(kvm);
}
+static int vt_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
+{
+ if (!is_td(kvm))
+ return -ENOTTY;
+
+ return tdx_vm_ioctl(kvm, argp);
+}
+
struct kvm_x86_ops vt_x86_ops __initdata = {
.name = "kvm_intel",
@@ -205,6 +213,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
.dev_mem_enc_ioctl = tdx_dev_ioctl,
+ .mem_enc_ioctl = vt_mem_enc_ioctl,
};
struct kvm_x86_init_ops vt_init_ops __initdata = {
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 5a3ed8217a54..d77709a6da51 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -477,6 +477,32 @@ int tdx_dev_ioctl(void __user *argp)
return 0;
}
+int tdx_vm_ioctl(struct kvm *kvm, void __user *argp)
+{
+ struct kvm_tdx_cmd tdx_cmd;
+ int r;
+
+ if (copy_from_user(&tdx_cmd, argp, sizeof(struct kvm_tdx_cmd)))
+ return -EFAULT;
+ if (tdx_cmd.error || tdx_cmd.unused)
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ switch (tdx_cmd.id) {
+ default:
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &tdx_cmd, sizeof(struct kvm_tdx_cmd)))
+ r = -EFAULT;
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
static int __init tdx_module_setup(void)
{
const struct tdsysinfo_struct *tdsysinfo;
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 057f2be3d818..93ffe2deb8e8 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -140,6 +140,8 @@ int tdx_dev_ioctl(void __user *argp);
int tdx_vm_init(struct kvm *kvm);
void tdx_mmu_release_hkid(struct kvm *kvm);
void tdx_vm_free(struct kvm *kvm);
+
+int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
#else
static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return 0; }
static inline bool tdx_is_vm_type_supported(unsigned long type) { return false; }
@@ -151,6 +153,8 @@ static inline int tdx_vm_init(struct kvm *kvm) { return -EOPNOTSUPP; }
static inline void tdx_mmu_release_hkid(struct kvm *kvm) {}
static inline void tdx_flush_shadow_all_private(struct kvm *kvm) {}
static inline void tdx_vm_free(struct kvm *kvm) {}
+
+static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
#endif
#endif /* __KVM_X86_VMX_X86_OPS_H */
--
2.25.1
Powered by blists - more mailing lists