[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251017003244.186495-21-seanjc@google.com>
Date: Thu, 16 Oct 2025 17:32:38 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Tianrui Zhao <zhaotianrui@...ngson.cn>, Bibo Mao <maobibo@...ngson.cn>,
Huacai Chen <chenhuacai@...nel.org>, Madhavan Srinivasan <maddy@...ux.ibm.com>,
Anup Patel <anup@...infault.org>, Paul Walmsley <pjw@...nel.org>,
Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Christian Borntraeger <borntraeger@...ux.ibm.com>, Janosch Frank <frankja@...ux.ibm.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>, Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>, "Kirill A. Shutemov" <kas@...nel.org>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
kvm@...r.kernel.org, loongarch@...ts.linux.dev, linux-mips@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, kvm-riscv@...ts.infradead.org,
linux-riscv@...ts.infradead.org, x86@...nel.org, linux-coco@...ts.linux.dev,
linux-kernel@...r.kernel.org, Ira Weiny <ira.weiny@...el.com>,
Kai Huang <kai.huang@...el.com>, Michael Roth <michael.roth@....com>,
Yan Zhao <yan.y.zhao@...el.com>, Vishal Annapurve <vannapurve@...gle.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>, Ackerley Tng <ackerleytng@...gle.com>,
Binbin Wu <binbin.wu@...ux.intel.com>
Subject: [PATCH v3 20/25] KVM: TDX: Add macro to retry SEAMCALLs when forcing
vCPUs out of guest
Add a macro to handle kicking vCPUs out of the guest and retrying
SEAMCALLs on -EBUSY instead of providing small helpers to be used by each
SEAMCALL. Wrapping the SEAMCALLs in a macro makes it a little harder to
tease out which SEAMCALL is being made, but significantly reduces the
amount of copy+paste code and makes it all but impossible to leave an
elevated wait_for_sept_zap.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/vmx/tdx.c | 72 ++++++++++++++----------------------------
1 file changed, 23 insertions(+), 49 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index f6782b0ffa98..2e2dab89c98f 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -294,25 +294,24 @@ static inline void tdx_disassociate_vp(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
-static void tdx_no_vcpus_enter_start(struct kvm *kvm)
-{
- struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
-
- lockdep_assert_held_write(&kvm->mmu_lock);
-
- WRITE_ONCE(kvm_tdx->wait_for_sept_zap, true);
-
- kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
-}
-
-static void tdx_no_vcpus_enter_stop(struct kvm *kvm)
-{
- struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
-
- lockdep_assert_held_write(&kvm->mmu_lock);
-
- WRITE_ONCE(kvm_tdx->wait_for_sept_zap, false);
-}
+#define tdh_do_no_vcpus(tdh_func, kvm, args...) \
+({ \
+ struct kvm_tdx *__kvm_tdx = to_kvm_tdx(kvm); \
+ u64 __err; \
+ \
+ lockdep_assert_held_write(&kvm->mmu_lock); \
+ \
+ __err = tdh_func(args); \
+ if (unlikely(tdx_operand_busy(__err))) { \
+ WRITE_ONCE(__kvm_tdx->wait_for_sept_zap, true); \
+ kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE); \
+ \
+ __err = tdh_func(args); \
+ \
+ WRITE_ONCE(__kvm_tdx->wait_for_sept_zap, false); \
+ } \
+ __err; \
+})
/* TDH.PHYMEM.PAGE.RECLAIM is allowed only when destroying the TD. */
static int __tdx_reclaim_page(struct page *page)
@@ -1711,14 +1710,7 @@ static void tdx_track(struct kvm *kvm)
if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
return;
- err = tdh_mem_track(&kvm_tdx->td);
- if (unlikely(tdx_operand_busy(err))) {
- /* After no vCPUs enter, the second retry is expected to succeed */
- tdx_no_vcpus_enter_start(kvm);
- err = tdh_mem_track(&kvm_tdx->td);
- tdx_no_vcpus_enter_stop(kvm);
- }
-
+ err = tdh_do_no_vcpus(tdh_mem_track, kvm, &kvm_tdx->td);
TDX_BUG_ON(err, TDH_MEM_TRACK, kvm);
kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
@@ -1770,14 +1762,8 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
return;
- err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
- if (unlikely(tdx_operand_busy(err))) {
- /* After no vCPUs enter, the second retry is expected to succeed */
- tdx_no_vcpus_enter_start(kvm);
- err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
- tdx_no_vcpus_enter_stop(kvm);
- }
-
+ err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
+ tdx_level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_RANGE_BLOCK, entry, level_state, kvm))
return;
@@ -1792,20 +1778,8 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
* with other vcpu sept operation.
* Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
*/
- err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
- &level_state);
-
- if (unlikely(tdx_operand_busy(err))) {
- /*
- * The second retry is expected to succeed after kicking off all
- * other vCPUs and prevent them from invoking TDH.VP.ENTER.
- */
- tdx_no_vcpus_enter_start(kvm);
- err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
- &level_state);
- tdx_no_vcpus_enter_stop(kvm);
- }
-
+ err = tdh_do_no_vcpus(tdh_mem_page_remove, kvm, &kvm_tdx->td, gpa,
+ tdx_level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_PAGE_REMOVE, entry, level_state, kvm))
return;
--
2.51.0.858.gf9c4a03a3a-goog
Powered by blists - more mailing lists