[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aH+lx0vJE5KA7ifd@intel.com>
Date: Tue, 22 Jul 2025 22:52:55 +0800
From: Chao Gao <chao.gao@...el.com>
To: Kai Huang <kai.huang@...el.com>
CC: <dave.hansen@...el.com>, <bp@...en8.de>, <tglx@...utronix.de>,
<peterz@...radead.org>, <mingo@...hat.com>, <hpa@...or.com>,
<thomas.lendacky@....com>, <x86@...nel.org>, <kas@...nel.org>,
<rick.p.edgecombe@...el.com>, <dwmw@...zon.co.uk>,
<linux-kernel@...r.kernel.org>, <pbonzini@...hat.com>, <seanjc@...gle.com>,
<kvm@...r.kernel.org>, <reinette.chatre@...el.com>,
<isaku.yamahata@...el.com>, <dan.j.williams@...el.com>,
<ashish.kalra@....com>, <nik.borisov@...e.com>, <sagis@...gle.com>, "Farrah
Chen" <farrah.chen@...el.com>
Subject: Re: [PATCH v4 3/7] x86/virt/tdx: Mark memory cache state incoherent
when making SEAMCALL
>+static __always_inline u64 do_seamcall(sc_func_t func, u64 fn,
>+ struct tdx_module_args *args)
>+{
>+ u64 ret;
>+
>+ lockdep_assert_preemption_disabled();
>+
>+ /*
>+ * SEAMCALLs are made to the TDX module and can generate dirty
>+ * cachelines of TDX private memory. Mark cache state incoherent
>+ * so that the cache can be flushed during kexec.
>+ *
>+ * This needs to be done before actually making the SEAMCALL,
>+ * because kexec-ing CPU could send NMI to stop remote CPUs,
>+ * in which case even disabling IRQ won't help here.
>+ */
>+ this_cpu_write(cache_state_incoherent, true);
>+
>+ ret = func(fn, args);
>+
>+ return ret;
@ret can be dropped here. Just
return func(fn, args);
should work.
And tracking cache incoherent state at the per-CPU level seems to add
unnecessary complexity. It requires a new do_seamcall() wrapper, setting the
flag on every seamcall rather than just the first one (I'm not concerned about
performance; it just feels silly), and using preempt_disable()/enable(). In my
view, per-CPU tracking at most saves a WBINVD on a CPU that never runs
SEAMCALLs during KEXEC, which is quite marginal. Did I miss any other benefits?
>+}
>+
> static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
> struct tdx_module_args *args)
> {
>@@ -113,7 +138,9 @@ static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
> u64 ret;
>
> do {
>- ret = func(fn, args);
>+ preempt_disable();
>+ ret = do_seamcall(func, fn, args);
>+ preempt_enable();
> } while (ret == TDX_RND_NO_ENTROPY && --retry);
>
> return ret;
>diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
>index c7a9a087ccaf..d6ee4e5a75d2 100644
>--- a/arch/x86/virt/vmx/tdx/tdx.c
>+++ b/arch/x86/virt/vmx/tdx/tdx.c
>@@ -1266,7 +1266,7 @@ static bool paddr_is_tdx_private(unsigned long phys)
> return false;
>
> /* Get page type from the TDX module */
>- sret = __seamcall_ret(TDH_PHYMEM_PAGE_RDMD, &args);
>+ sret = do_seamcall(__seamcall_ret, TDH_PHYMEM_PAGE_RDMD, &args);
>
> /*
> * The SEAMCALL will not return success unless there is a
>@@ -1522,7 +1522,7 @@ noinstr __flatten u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *ar
> {
> args->rcx = tdx_tdvpr_pa(td);
>
>- return __seamcall_saved_ret(TDH_VP_ENTER, args);
>+ return do_seamcall(__seamcall_saved_ret, TDH_VP_ENTER, args);
> }
> EXPORT_SYMBOL_GPL(tdh_vp_enter);
>
>--
>2.50.0
>
Powered by blists - more mailing lists