[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260106102108.25074-1-yan.y.zhao@intel.com>
Date: Tue, 6 Jan 2026 18:21:08 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
x86@...nel.org,
rick.p.edgecombe@...el.com,
dave.hansen@...el.com,
kas@...nel.org,
tabba@...gle.com,
ackerleytng@...gle.com,
michael.roth@....com,
david@...nel.org,
vannapurve@...gle.com,
sagis@...gle.com,
vbabka@...e.cz,
thomas.lendacky@....com,
nik.borisov@...e.com,
pgonda@...gle.com,
fan.du@...el.com,
jun.miao@...el.com,
francescolavra.fl@...il.com,
jgross@...e.com,
ira.weiny@...el.com,
isaku.yamahata@...el.com,
xiaoyao.li@...el.com,
kai.huang@...el.com,
binbin.wu@...ux.intel.com,
chao.p.peng@...el.com,
chao.gao@...el.com,
yan.y.zhao@...el.com
Subject: [PATCH v3 09/24] KVM: x86: Reject splitting huge pages under shared mmu_lock in TDX
Allow propagating SPTE splitting changes from the mirror page table to the
external page table in the fault path under shared mmu_lock, while
rejecting this splitting request in TDX's implementation of
kvm_x86_ops.split_external_spte().
Allow tdp_mmu_split_huge_page() to be invoked for the mirror page table in
the fault path by removing the KVM_BUG_ON() immediately before it.
set_external_spte_present() is invoked in the fault path under shared
mmu_lock to propagate transitions from the mirror page table to the
external page table when the target SPTE is present. Add "splitting" as a
valid transition case in set_external_spte_present() and invoke the helper
split_external_spte() to perform the propagation.
Pass shared mmu_lock information to kvm_x86_ops.split_external_spte() and
reject the splitting request in TDX's implementation of
kvm_x86_ops.split_external_spte() when under shared mmu_lock.
This is because TDX requires different handling for splitting under shared
versus exclusive mmu_lock: under shared mmu_lock, TDX cannot kick off all
vCPUs to avoid BUSY errors from DEMOTE. Since the current TDX module
(i.e., without feature NON-BLOCKING-RESIZE) requires BLOCK/TRACK/kicking
off vCPUs to be invoked before each DEMOTE, if a BUSY error occurs from
DEMOTE, TDX must call UNBLOCK before returning the error to the KVM MMU
core to roll back the old SPTE and retry. However, UNBLOCK itself may also
fail due to contentions.
Rejecting splitting of private huge pages under shared mmu_lock in TDX
rather than using KVM_BUG_ON() in the KVM MMU core allows for splitting
under shared mmu_lock once the TDX module supports the NON-BLOCKING-RESIZE
feature, keeping the KVM MMU core framework stable across TDX module
implementation changes.
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
---
v3:
- Rebased on top of Sean's cleanup series.
- split_external_spte --> kvm_x86_ops.split_external_spte(). (Kai)
RFC v2:
- WARN_ON_ONCE() and return error in tdx_sept_split_private_spt() if it's
invoked under shared mmu_lock. (rather than increase the next fault's
max_level in current vCPU via tdx->violation_gfn_start/end and
tdx->violation_request_level).
- TODO: Perform the real implementation of demote under shared mmu_lock
when new version of TDX module supporting non-blocking demote is
available.
RFC v1:
- New patch.
---
arch/x86/include/asm/kvm_host.h | 3 +-
arch/x86/kvm/mmu/tdp_mmu.c | 51 +++++++++++++++++++++------------
arch/x86/kvm/vmx/tdx.c | 9 +++++-
3 files changed, 42 insertions(+), 21 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 56089d6b9b51..315ffb23e9d8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1850,7 +1850,8 @@ struct kvm_x86_ops {
/* Split a huge mapping into smaller mappings in external page table */
int (*split_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
- u64 old_mirror_spte, void *new_external_spt);
+ u64 old_mirror_spte, void *new_external_spt,
+ bool mmu_lock_shared);
/* Allocation a pages from the external page cache. */
void *(*alloc_external_fault_cache)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 977914b2627f..9b45ffb8585f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -509,7 +509,7 @@ static void *get_external_spt(gfn_t gfn, u64 new_spte, int level)
}
static int split_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
- u64 new_spte, int level)
+ u64 new_spte, int level, bool shared)
{
void *new_external_spt = get_external_spt(gfn, new_spte, level);
int ret;
@@ -517,7 +517,7 @@ static int split_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
KVM_BUG_ON(!new_external_spt, kvm);
ret = kvm_x86_call(split_external_spte)(kvm, gfn, level, old_spte,
- new_external_spt);
+ new_external_spt, shared);
return ret;
}
@@ -527,10 +527,20 @@ static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sp
{
bool was_present = is_shadow_present_pte(old_spte);
bool is_present = is_shadow_present_pte(new_spte);
+ bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
int ret = 0;
- KVM_BUG_ON(was_present, kvm);
+ /*
+ * The caller __tdp_mmu_set_spte_atomic() has ensured new_spte must be
+ * present.
+ *
+ * Current valid transitions:
+ * - leaf to non-leaf (demote)
+ * - !present to present leaf
+ * - !present to present non-leaf
+ */
+ KVM_BUG_ON(!(!was_present || (was_leaf && !is_leaf)), kvm);
lockdep_assert_held(&kvm->mmu_lock);
/*
@@ -541,18 +551,24 @@ static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sp
if (!try_cmpxchg64(rcu_dereference(sptep), &old_spte, FROZEN_SPTE))
return -EBUSY;
- /*
- * Use different call to either set up middle level
- * external page table, or leaf.
- */
- if (is_leaf) {
- ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_spte);
- } else {
- void *external_spt = get_external_spt(gfn, new_spte, level);
+ if (!was_present) {
+ /*
+ * Use different call to either set up middle level external
+ * page table, or leaf.
+ */
+ if (is_leaf) {
+ ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_spte);
+ } else {
+ void *external_spt = get_external_spt(gfn, new_spte, level);
- KVM_BUG_ON(!external_spt, kvm);
- ret = kvm_x86_call(link_external_spt)(kvm, gfn, level, external_spt);
+ KVM_BUG_ON(!external_spt, kvm);
+ ret = kvm_x86_call(link_external_spt)(kvm, gfn, level, external_spt);
+ }
+ } else if (was_leaf && !is_leaf) {
+ /* splitting */
+ ret = split_external_spte(kvm, gfn, old_spte, new_spte, level, true);
}
+
if (ret)
__kvm_tdp_mmu_write_spte(sptep, old_spte);
else
@@ -782,7 +798,7 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
if (!is_shadow_present_pte(new_spte))
remove_external_spte(kvm, gfn, old_spte, level);
else if (is_last_spte(old_spte, level) && !is_last_spte(new_spte, level))
- split_external_spte(kvm, gfn, old_spte, new_spte, level);
+ split_external_spte(kvm, gfn, old_spte, new_spte, level, false);
else
KVM_BUG_ON(1, kvm);
}
@@ -1331,13 +1347,10 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
- if (is_shadow_present_pte(iter.old_spte)) {
- /* Don't support large page for mirrored roots (TDX) */
- KVM_BUG_ON(is_mirror_sptep(iter.sptep), vcpu->kvm);
+ if (is_shadow_present_pte(iter.old_spte))
r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
- } else {
+ else
r = tdp_mmu_link_sp(kvm, &iter, sp, true);
- }
/*
* Force the guest to retry if installing an upper level SPTE
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index b41793402769..1e29722abb36 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1926,7 +1926,8 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
* BUSY.
*/
static int tdx_sept_split_private_spte(struct kvm *kvm, gfn_t gfn, enum pg_level level,
- u64 old_mirror_spte, void *new_private_spt)
+ u64 old_mirror_spte, void *new_private_spt,
+ bool mmu_lock_shared)
{
struct page *new_sept_page = virt_to_page(new_private_spt);
int tdx_level = pg_level_to_tdx_sept_level(level);
@@ -1938,6 +1939,12 @@ static int tdx_sept_split_private_spte(struct kvm *kvm, gfn_t gfn, enum pg_level
level != PG_LEVEL_2M, kvm))
return -EIO;
+ if (WARN_ON_ONCE(mmu_lock_shared)) {
+ pr_warn_once("Splitting of GFN %llx level %d under shared lock occurs when KVM does not support it yet\n",
+ gfn, level);
+ return -EOPNOTSUPP;
+ }
+
err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
tdx_level, &entry, &level_state);
if (TDX_BUG_ON_2(err, TDH_MEM_RANGE_BLOCK, entry, level_state, kvm))
--
2.43.2
Powered by blists - more mailing lists