[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260129011517.3545883-35-seanjc@google.com>
Date: Wed, 28 Jan 2026 17:15:06 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Thomas Gleixner <tglx@...nel.org>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
Kiryl Shutsemau <kas@...nel.org>, Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-coco@...ts.linux.dev,
kvm@...r.kernel.org, Kai Huang <kai.huang@...el.com>,
Rick Edgecombe <rick.p.edgecombe@...el.com>, Yan Zhao <yan.y.zhao@...el.com>,
Vishal Annapurve <vannapurve@...gle.com>, Ackerley Tng <ackerleytng@...gle.com>,
Sagi Shahar <sagis@...gle.com>, Binbin Wu <binbin.wu@...ux.intel.com>,
Xiaoyao Li <xiaoyao.li@...el.com>, Isaku Yamahata <isaku.yamahata@...el.com>
Subject: [RFC PATCH v5 34/45] KVM: TDX: Handle removal of leaf SPTEs in .set_private_spte()
Drop kvm_x86_ops.remove_external_spte(), and instead handling the removal
of leaf SPTEs in the S-EPT (a.k.a. external root) in .set_private_spte().
This will allow extending tdx_sept_set_private_spte() to support splitting
a huge S-EPT entry without needing yet another kvm_x86_ops hook.
Bug the VM if the callback fails, as redundant KVM_BUG_ON() calls are
benign (the WARN will fire if and only if the VM isn't already bugged) and
handle_changed_spte() is most definitely not prepared to handle failure.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 -
arch/x86/include/asm/kvm_host.h | 2 --
arch/x86/kvm/mmu/tdp_mmu.c | 20 +++++++++++---------
arch/x86/kvm/vmx/tdx.c | 21 ++++++++++++---------
4 files changed, 23 insertions(+), 21 deletions(-)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 394dc29483a7..3ca56fe6b951 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -97,7 +97,6 @@ KVM_X86_OP(load_mmu_pgd)
KVM_X86_OP_OPTIONAL(alloc_external_sp)
KVM_X86_OP_OPTIONAL(free_external_sp)
KVM_X86_OP_OPTIONAL_RET0(set_external_spte)
-KVM_X86_OP_OPTIONAL(remove_external_spte)
KVM_X86_OP_OPTIONAL(reclaim_external_sp)
KVM_X86_OP_OPTIONAL_RET0(topup_external_cache)
KVM_X86_OP(has_wbinvd_exit)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 67deec8e205e..385f1cf32d70 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1861,8 +1861,6 @@ struct kvm_x86_ops {
u64 new_spte, enum pg_level level);
void (*reclaim_external_sp)(struct kvm *kvm, gfn_t gfn,
struct kvm_mmu_page *sp);
- void (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
- u64 mirror_spte);
int (*topup_external_cache)(struct kvm_vcpu *vcpu, int min);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 271dd6f875a6..d49aecba18d8 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -559,20 +559,22 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
* SPTE being converted to a hugepage (leaf) or being zapped. Shadow
* pages are kernel allocations and should never be migrated.
*
- * When removing leaf entries from a mirror, immediately propagate the
- * changes to the external page tables. Note, non-leaf mirror entries
- * are handled by handle_removed_pt(), as TDX requires that all leaf
- * entries are removed before the owning page table. Note #2, writes
- * to make mirror PTEs shadow-present are propagated to external page
- * tables by __tdp_mmu_set_spte_atomic(), as KVM needs to ensure the
- * external page table was successfully updated before marking the
- * mirror SPTE present.
+ * When modifying leaf entries in mirrored page tables, propagate the
+ * changes to the external SPTE. Bug the VM on failure, as callers
+ * aren't prepared to handle errors, e.g. due to lock contention in the
+ * TDX-Module. Note, changes to non-leaf mirror SPTEs are handled by
+ * handle_removed_pt() (the TDX-Module requires that child entries are
+ * removed before the parent SPTE), and changes to non-present mirror
+ * SPTEs are handled by __tdp_mmu_set_spte_atomic() (KVM needs to set
+ * the external SPTE while the mirror SPTE is frozen so that installing
+ * a new SPTE is effectively an atomic operation).
*/
if (was_present && !was_leaf &&
(is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
else if (was_leaf && is_mirror_sptep(sptep) && !is_leaf)
- kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_spte);
+ KVM_BUG_ON(kvm_x86_call(set_external_spte)(kvm, gfn, old_spte,
+ new_spte, level), kvm);
}
static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 0f3d27699a3d..9f7789c5f0a7 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1751,11 +1751,11 @@ static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
return 0;
}
-static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, u64 mirror_spte)
+static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, u64 old_spte)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
- kvm_pfn_t pfn = spte_to_pfn(mirror_spte);
+ kvm_pfn_t pfn = spte_to_pfn(old_spte);
gpa_t gpa = gfn_to_gpa(gfn);
u64 err, entry, level_state;
@@ -1767,16 +1767,16 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
* there can't be anything populated in the private EPT.
*/
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
- return;
+ return -EIO;
/* TODO: handle large pages. */
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
- return;
+ return -EIO;
err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_RANGE_BLOCK, entry, level_state, kvm))
- return;
+ return -EIO;
/*
* TDX requires TLB tracking before dropping private page. Do
@@ -1792,14 +1792,15 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
err = tdh_do_no_vcpus(tdh_mem_page_remove, kvm, &kvm_tdx->td, gpa,
level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_PAGE_REMOVE, entry, level_state, kvm))
- return;
+ return -EIO;
err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, pfn, level);
if (TDX_BUG_ON(err, TDH_PHYMEM_PAGE_WBINVD, kvm))
- return;
+ return -EIO;
__tdx_quirk_reset_page(pfn, level);
tdx_pamt_put(pfn, level);
+ return 0;
}
static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
@@ -1811,6 +1812,9 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
struct vcpu_tdx *tdx = to_tdx(vcpu);
int ret;
+ if (is_shadow_present_pte(old_spte))
+ return tdx_sept_remove_private_spte(kvm, gfn, level, old_spte);
+
if (KVM_BUG_ON(!vcpu, kvm))
return -EINVAL;
@@ -3639,7 +3643,6 @@ void __init tdx_hardware_setup(void)
vt_x86_ops.set_external_spte = tdx_sept_set_private_spte;
vt_x86_ops.reclaim_external_sp = tdx_sept_reclaim_private_sp;
- vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte;
/*
* FIXME: Wire up the PAMT hook iff DPAMT is supported, once VMXON is
--
2.53.0.rc1.217.geba53bf80e-goog
Powered by blists - more mailing lists