[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4dfb1a64-e33f-4c87-a02c-753f918aa9d4@intel.com>
Date: Mon, 10 Mar 2025 15:25:48 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: adrian.hunter@...el.com, seanjc@...gle.com, rick.p.edgecombe@...el.com,
Isaku Yamahata <isaku.yamahata@...el.com>,
Tony Lindgren <tony.lindgren@...ux.intel.com>
Subject: Re: [PATCH v3 07/10] KVM: TDX: restore user ret MSRs
On 3/8/2025 5:20 AM, Paolo Bonzini wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
>
> Several user ret MSRs are clobbered on TD exit. Ensure the MSR cache is
> updated on vcpu_put, and the MSRs themselves before returning to ring 3.
Reviewed-by: Xiayao Li <xiaoyao.li@...el.com>
> Co-developed-by: Tony Lindgren <tony.lindgren@...ux.intel.com>
> Signed-off-by: Tony Lindgren <tony.lindgren@...ux.intel.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> Signed-off-by: Adrian Hunter <adrian.hunter@...el.com>
> Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
> Message-ID: <20250129095902.16391-10-adrian.hunter@...el.com>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/vmx/tdx.c | 51 +++++++++++++++++++++++++++++++++++++++++-
> arch/x86/kvm/vmx/tdx.h | 1 +
> 2 files changed, 51 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index b2948318cd8b..5819ed926166 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -646,9 +646,32 @@ void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
> vt->guest_state_loaded = true;
> }
>
> +struct tdx_uret_msr {
> + u32 msr;
> + unsigned int slot;
> + u64 defval;
> +};
> +
> +static struct tdx_uret_msr tdx_uret_msrs[] = {
> + {.msr = MSR_SYSCALL_MASK, .defval = 0x20200 },
> + {.msr = MSR_STAR,},
> + {.msr = MSR_LSTAR,},
> + {.msr = MSR_TSC_AUX,},
> +};
> +
> +static void tdx_user_return_msr_update_cache(void)
> +{
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
> + kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
> + tdx_uret_msrs[i].defval);
> +}
> +
> static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
> {
> struct vcpu_vt *vt = to_vt(vcpu);
> + struct vcpu_tdx *tdx = to_tdx(vcpu);
>
> if (!vt->guest_state_loaded)
> return;
> @@ -656,6 +679,11 @@ static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
> ++vcpu->stat.host_state_reload;
> wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
>
> + if (tdx->guest_entered) {
> + tdx_user_return_msr_update_cache();
> + tdx->guest_entered = false;
> + }
> +
> vt->guest_state_loaded = false;
> }
>
> @@ -762,6 +790,8 @@ EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
>
> fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
> {
> + struct vcpu_tdx *tdx = to_tdx(vcpu);
> +
> /*
> * force_immediate_exit requires vCPU entering for events injection with
> * an immediately exit followed. But The TDX module doesn't guarantee
> @@ -777,6 +807,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
> tdx_vcpu_enter_exit(vcpu);
>
> tdx_load_host_xsave_state(vcpu);
> + tdx->guest_entered = true;
>
> vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET;
>
> @@ -2236,7 +2267,25 @@ static int __init __do_tdx_bringup(void)
> static int __init __tdx_bringup(void)
> {
> const struct tdx_sys_info_td_conf *td_conf;
> - int r;
> + int r, i;
> +
> + for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) {
> + /*
> + * Check if MSRs (tdx_uret_msrs) can be saved/restored
> + * before returning to user space.
> + *
> + * this_cpu_ptr(user_return_msrs)->registered isn't checked
> + * because the registration is done at vcpu runtime by
> + * tdx_user_return_msr_update_cache().
> + */
> + tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr);
> + if (tdx_uret_msrs[i].slot == -1) {
> + /* If any MSR isn't supported, it is a KVM bug */
> + pr_err("MSR %x isn't included by kvm_find_user_return_msr\n",
> + tdx_uret_msrs[i].msr);
> + return -EIO;
> + }
> + }
>
> /*
> * Enabling TDX requires enabling hardware virtualization first,
> diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> index 6eb24bbacccc..55af3d866ff6 100644
> --- a/arch/x86/kvm/vmx/tdx.h
> +++ b/arch/x86/kvm/vmx/tdx.h
> @@ -56,6 +56,7 @@ struct vcpu_tdx {
> u64 vp_enter_ret;
>
> enum vcpu_tdx_state state;
> + bool guest_entered;
> };
>
> void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);
Powered by blists - more mailing lists