[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhR5DF4opiZQXD6T0c0BQctEDjTQDeK4Zn8kvkPy3_gRns+8Q@mail.gmail.com>
Date: Tue, 18 Apr 2023 10:51:48 -0700
From: Sagi Shahar <sagis@...gle.com>
To: Zhi Wang <zhi.wang.linux@...il.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, x86@...nel.org,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Isaku Yamahata <isaku.yamahata@...el.com>,
Erdem Aktas <erdemaktas@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Chao Peng <chao.p.peng@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>
Subject: Re: [RFC PATCH 3/5] KVM: TDX: Add base implementation for tdx_vm_move_enc_context_from
On Tue, Apr 18, 2023 at 5:12 AM Zhi Wang <zhi.wang.linux@...il.com> wrote:
>
> On Fri, 7 Apr 2023 20:19:19 +0000
> Sagi Shahar <sagis@...gle.com> wrote:
>
> What was the status of the src VM when calling the vm_move_enc_context_from?
> Is it still active like common live migration or it has been paused?
>
Yes the source VM is still active like in the live migration case.
You can also see that we check that the source VM is finalized when we
call tdx_guest before migrating the state.
> > This should mostly match the logic in sev_vm_move_enc_context_from.
> >
> > Signed-off-by: Sagi Shahar <sagis@...gle.com>
> > ---
> > arch/x86/kvm/vmx/main.c | 10 +++++++
> > arch/x86/kvm/vmx/tdx.c | 56 ++++++++++++++++++++++++++++++++++++++
> > arch/x86/kvm/vmx/tdx.h | 2 ++
> > arch/x86/kvm/vmx/x86_ops.h | 5 ++++
> > 4 files changed, 73 insertions(+)
> >
> > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > index 5b64fe5404958..9d5d0ac465bf6 100644
> > --- a/arch/x86/kvm/vmx/main.c
> > +++ b/arch/x86/kvm/vmx/main.c
> > @@ -979,6 +979,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> > return tdx_vcpu_ioctl(vcpu, argp);
> > }
> >
> > +static int vt_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > +{
> > + if (!is_td(kvm))
> > + return -ENOTTY;
> > +
> > + return tdx_vm_move_enc_context_from(kvm, source_fd);
> > +}
> > +
> > #define VMX_REQUIRED_APICV_INHIBITS \
> > ( \
> > BIT(APICV_INHIBIT_REASON_DISABLE)| \
> > @@ -1141,6 +1149,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> > .dev_mem_enc_ioctl = tdx_dev_ioctl,
> > .mem_enc_ioctl = vt_mem_enc_ioctl,
> > .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
> > +
> > + .vm_move_enc_context_from = vt_move_enc_context_from,
> > };
> >
> > struct kvm_x86_init_ops vt_init_ops __initdata = {
> > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > index 8af7e4e81c860..0999a6d827c99 100644
> > --- a/arch/x86/kvm/vmx/tdx.c
> > +++ b/arch/x86/kvm/vmx/tdx.c
> > @@ -2826,3 +2826,59 @@ int __init tdx_init(void)
> > INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, cpu));
> > return 0;
> > }
> > +
> > +static __always_inline bool tdx_guest(struct kvm *kvm)
> > +{
> > + struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm);
> > +
> > + return tdx_kvm->finalized;
> > +}
> return is_td_finalized()?
> > +
> > +static int tdx_migrate_from(struct kvm *dst, struct kvm *src)
> > +{
> > + return -EINVAL;
> > +}
> > +
> > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> > +{
> > + struct kvm_tdx *dst_tdx = to_kvm_tdx(kvm);
> > + struct file *src_kvm_file;
> > + struct kvm_tdx *src_tdx;
> > + struct kvm *src_kvm;
> > + int ret;
> > +
> > + src_kvm_file = fget(source_fd);
> > + if (!file_is_kvm(src_kvm_file)) {
> > + ret = -EBADF;
> > + goto out_fput;
> > + }
> > + src_kvm = src_kvm_file->private_data;
> > + src_tdx = to_kvm_tdx(src_kvm);
> > +
> > + ret = pre_move_enc_context_from(kvm, src_kvm,
> > + &dst_tdx->migration_in_progress,
> > + &src_tdx->migration_in_progress);
> > + if (ret)
> > + goto out_fput;
> > +
> > + if (tdx_guest(kvm) || !tdx_guest(src_kvm)) {
> > + ret = -EINVAL;
> > + goto out_post;
> > + }
> > +
> > + ret = tdx_migrate_from(kvm, src_kvm);
> > + if (ret)
> > + goto out_post;
> > +
> > + kvm_vm_dead(src_kvm);
> > + ret = 0;
> > +
> > +out_post:
> > + post_move_enc_context_from(kvm, src_kvm,
> > + &dst_tdx->migration_in_progress,
> > + &src_tdx->migration_in_progress);
> > +out_fput:
> > + if (src_kvm_file)
> > + fput(src_kvm_file);
> > + return ret;
> > +}
> > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> > index 71818c5001862..21b7e710be1fd 100644
> > --- a/arch/x86/kvm/vmx/tdx.h
> > +++ b/arch/x86/kvm/vmx/tdx.h
> > @@ -24,6 +24,8 @@ struct kvm_tdx {
> > atomic_t tdh_mem_track;
> >
> > u64 tsc_offset;
> > +
> > + atomic_t migration_in_progress;
> > };
> >
> > union tdx_exit_reason {
> > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> > index d049e0c72ed0c..275f5d75e9bf1 100644
> > --- a/arch/x86/kvm/vmx/x86_ops.h
> > +++ b/arch/x86/kvm/vmx/x86_ops.h
> > @@ -187,6 +187,8 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> > void tdx_flush_tlb(struct kvm_vcpu *vcpu);
> > int tdx_sept_tlb_remote_flush(struct kvm *kvm);
> > void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> > +
> > +int tdx_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
> > #else
> > static inline int tdx_init(void) { return 0; };
> > static inline int tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -ENOSYS; }
> > @@ -241,6 +243,9 @@ static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { ret
> > static inline void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
> > static inline int tdx_sept_tlb_remote_flush(struct kvm *kvm) { return 0; }
> > static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
> > +
> > +static inline int tdx_vm_move_enc_context_from(struct kvm *kvm, u
> > + nsigned int source_fd) { return -EOPNOTSUPP; }
> > #endif
> >
> > #if defined(CONFIG_INTEL_TDX_HOST) && defined(CONFIG_KVM_SMM)
>
Powered by blists - more mailing lists