[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3846c1be-73cd-17d6-5e2a-9f7edde2ef9c@redhat.com>
Date: Fri, 15 Apr 2022 16:14:36 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: isaku.yamahata@...el.com, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...il.com, Jim Mattson <jmattson@...gle.com>,
erdemaktas@...gle.com, Connor Kuehl <ckuehl@...hat.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: Re: [RFC PATCH v5 072/104] KVM: TDX: handle vcpu migration over
logical processor
On 3/4/22 20:49, isaku.yamahata@...el.com wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
>
> For vcpu migration, in the case of VMX, VCMS is flushed on the source pcpu,
> and load it on the target pcpu. There are corresponding TDX SEAMCALL APIs,
> call them on vcpu migration. The logic is mostly same as VMX except the
> TDX SEAMCALLs are used.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> arch/x86/kvm/vmx/main.c | 20 +++++++++++++--
> arch/x86/kvm/vmx/tdx.c | 51 ++++++++++++++++++++++++++++++++++++++
> arch/x86/kvm/vmx/x86_ops.h | 2 ++
> 3 files changed, 71 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index f9d43f2de145..2cd5ba0e8788 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -121,6 +121,14 @@ static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu)
> return vmx_vcpu_run(vcpu);
> }
>
> +static void vt_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> + if (is_td_vcpu(vcpu))
> + return tdx_vcpu_load(vcpu, cpu);
> +
> + return vmx_vcpu_load(vcpu, cpu);
> +}
> +
> static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
> {
> if (is_td_vcpu(vcpu))
> @@ -162,6 +170,14 @@ static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
> vmx_load_mmu_pgd(vcpu, root_hpa, pgd_level);
> }
>
> +static void vt_sched_in(struct kvm_vcpu *vcpu, int cpu)
> +{
> + if (is_td_vcpu(vcpu))
> + return;
> +
> + vmx_sched_in(vcpu, cpu);
> +}
> +
> static int vt_mem_enc_op(struct kvm *kvm, void __user *argp)
> {
> if (!is_td(kvm))
> @@ -199,7 +215,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> .vcpu_reset = vt_vcpu_reset,
>
> .prepare_guest_switch = vt_prepare_switch_to_guest,
> - .vcpu_load = vmx_vcpu_load,
> + .vcpu_load = vt_vcpu_load,
> .vcpu_put = vt_vcpu_put,
>
> .update_exception_bitmap = vmx_update_exception_bitmap,
> @@ -285,7 +301,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
>
> .request_immediate_exit = vmx_request_immediate_exit,
>
> - .sched_in = vmx_sched_in,
> + .sched_in = vt_sched_in,
>
> .cpu_dirty_log_size = PML_ENTITY_NUM,
> .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 37cf7d43435d..a6b1a8ce888d 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -85,6 +85,18 @@ static inline bool is_td_finalized(struct kvm_tdx *kvm_tdx)
> return kvm_tdx->finalized;
> }
>
> +static inline void tdx_disassociate_vp(struct kvm_vcpu *vcpu)
> +{
> + /*
> + * Ensure tdx->cpu_list is updated is before setting vcpu->cpu to -1,
> + * otherwise, a different CPU can see vcpu->cpu = -1 and add the vCPU
> + * to its list before its deleted from this CPUs list.
> + */
> + smp_wmb();
> +
> + vcpu->cpu = -1;
> +}
> +
> static void tdx_clear_page(unsigned long page)
> {
> const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
> @@ -155,6 +167,39 @@ static void tdx_reclaim_td_page(struct tdx_td_page *page)
> free_page(page->va);
> }
>
> +static void tdx_flush_vp(void *arg)
> +{
> + struct kvm_vcpu *vcpu = arg;
> + u64 err;
> +
> + /* Task migration can race with CPU offlining. */
> + if (vcpu->cpu != raw_smp_processor_id())
> + return;
> +
> + /*
> + * No need to do TDH_VP_FLUSH if the vCPU hasn't been initialized. The
> + * list tracking still needs to be updated so that it's correct if/when
> + * the vCPU does get initialized.
> + */
> + if (is_td_vcpu_created(to_tdx(vcpu))) {
> + err = tdh_vp_flush(to_tdx(vcpu)->tdvpr.pa);
> + if (unlikely(err && err != TDX_VCPU_NOT_ASSOCIATED)) {
> + if (WARN_ON_ONCE(err))
> + pr_tdx_error(TDH_VP_FLUSH, err, NULL);
> + }
> + }
> +
> + tdx_disassociate_vp(vcpu);
> +}
> +
> +static void tdx_flush_vp_on_cpu(struct kvm_vcpu *vcpu)
> +{
> + if (unlikely(vcpu->cpu == -1))
> + return;
> +
> + smp_call_function_single(vcpu->cpu, tdx_flush_vp, vcpu, 1);
> +}
> +
> static int tdx_do_tdh_phymem_cache_wb(void *param)
> {
> u64 err = 0;
> @@ -425,6 +470,12 @@ int tdx_vcpu_create(struct kvm_vcpu *vcpu)
> return ret;
> }
>
> +void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> + if (vcpu->cpu != cpu)
> + tdx_flush_vp_on_cpu(vcpu);
> +}
> +
> void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
> {
> struct vcpu_tdx *tdx = to_tdx(vcpu);
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index 8b871c5f52cf..ceafd6e18f4e 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -143,6 +143,7 @@ void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
> fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu);
> void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
> void tdx_vcpu_put(struct kvm_vcpu *vcpu);
> +void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
>
> int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
> int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> @@ -166,6 +167,7 @@ static inline void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {}
> static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu) { return EXIT_FASTPATH_NONE; }
> static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
> static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
> +static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
>
> static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
> static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }
This patch and the next one might even be squashed together.
Otherwise
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
Powered by blists - more mailing lists