[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502130828.4071412-9-kirill.shutemov@linux.intel.com>
Date: Fri, 2 May 2025 16:08:24 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: rick.p.edgecombe@...el.com,
isaku.yamahata@...el.com,
kai.huang@...el.com,
yan.y.zhao@...el.com,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
kvm@...r.kernel.org,
x86@...nel.org,
linux-coco@...ts.linux.dev,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [RFC, PATCH 08/12] KVM: x86/tdp_mmu: Add phys_prepare() and phys_cleanup() to kvm_x86_ops
The functions kvm_x86_ops::link_external_spt() and
kvm_x86_ops::set_external_spte() are used to assign new memory to a VM.
When using TDX with Dynamic PAMT enabled, the assigned memory must be
covered by PAMT.
The new function kvm_x86_ops::phys_prepare() is called before
link_external_spt() and set_external_spte() to ensure that the memory is
ready to be assigned to the virtual machine. In the case of TDX, it
makes sure that the memory is covered by PAMT.
kvm_x86_ops::phys_prepare() is called in a context where struct kvm_vcpu
is available, allowing the implementation to allocate memory from a
per-VCPU pool.
The function kvm_x86_ops::phys_cleanup() frees PAMT memory in case of
failure.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
arch/x86/include/asm/kvm-x86-ops.h | 2 ++
arch/x86/include/asm/kvm_host.h | 3 ++
arch/x86/kvm/mmu/tdp_mmu.c | 47 +++++++++++++++++++++++++++---
3 files changed, 48 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 79406bf07a1c..37081d04e82f 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -99,6 +99,8 @@ KVM_X86_OP_OPTIONAL(link_external_spt)
KVM_X86_OP_OPTIONAL(set_external_spte)
KVM_X86_OP_OPTIONAL(free_external_spt)
KVM_X86_OP_OPTIONAL(remove_external_spte)
+KVM_X86_OP_OPTIONAL(phys_prepare)
+KVM_X86_OP_OPTIONAL(phys_cleanup)
KVM_X86_OP(has_wbinvd_exit)
KVM_X86_OP(get_l2_tsc_offset)
KVM_X86_OP(get_l2_tsc_multiplier)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6c06f3d6e081..91958c55f918 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1813,6 +1813,9 @@ struct kvm_x86_ops {
int (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
kvm_pfn_t pfn_for_gfn);
+ int (*phys_prepare)(struct kvm_vcpu *vcpu, kvm_pfn_t pfn);
+ void (*phys_cleanup)(kvm_pfn_t pfn);
+
bool (*has_wbinvd_exit)(void);
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 405874f4d088..f6c836b2e6fc 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1137,6 +1137,26 @@ void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
}
}
+static int tdp_mmu_install_spte(struct kvm_vcpu *vcpu,
+ struct tdp_iter *iter,
+ u64 spte)
+{
+ kvm_pfn_t pfn = 0;
+ int ret = 0;
+
+ if (is_mirror_sptep(iter->sptep) && !is_frozen_spte(spte)) {
+ pfn = spte_to_pfn(spte);
+ ret = static_call(kvm_x86_phys_prepare)(vcpu, pfn);
+ }
+ if (ret)
+ return ret;
+ ret = tdp_mmu_set_spte_atomic(vcpu->kvm, iter, spte);
+ if (pfn && ret)
+ static_call(kvm_x86_phys_cleanup)(pfn);
+
+ return ret;
+}
+
/*
* Installs a last-level SPTE to handle a TDP page fault.
* (NPT/EPT violation/misconfiguration)
@@ -1170,7 +1190,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
if (new_spte == iter->old_spte)
ret = RET_PF_SPURIOUS;
- else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
+ else if (tdp_mmu_install_spte(vcpu, iter, new_spte))
return RET_PF_RETRY;
else if (is_shadow_present_pte(iter->old_spte) &&
(!is_last_spte(iter->old_spte, iter->level) ||
@@ -1211,7 +1231,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
* Returns: 0 if the new page table was installed. Non-0 if the page table
* could not be installed (e.g. the atomic compare-exchange failed).
*/
-static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
+static int __tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
struct kvm_mmu_page *sp, bool shared)
{
u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled);
@@ -1230,6 +1250,25 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
return 0;
}
+static int tdp_mmu_link_sp(struct kvm_vcpu *vcpu, struct tdp_iter *iter,
+ struct kvm_mmu_page *sp, bool shared)
+{
+ kvm_pfn_t pfn = 0;
+ int ret = 0;
+
+ if (sp->external_spt) {
+ pfn = __pa(sp->external_spt) >> PAGE_SHIFT;
+ ret = static_call(kvm_x86_phys_prepare)(vcpu, pfn);
+ if (ret)
+ return ret;
+ }
+ ret = __tdp_mmu_link_sp(vcpu->kvm, iter, sp, shared);
+ if (pfn && ret)
+ static_call(kvm_x86_phys_cleanup)(pfn);
+
+ return ret;
+}
+
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
struct kvm_mmu_page *sp, bool shared);
@@ -1288,7 +1327,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
KVM_BUG_ON(is_mirror_sptep(iter.sptep), vcpu->kvm);
r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
} else {
- r = tdp_mmu_link_sp(kvm, &iter, sp, true);
+ r = tdp_mmu_link_sp(vcpu, &iter, sp, true);
}
/*
@@ -1514,7 +1553,7 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
* correctness standpoint since the translation will be the same either
* way.
*/
- ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
+ ret = __tdp_mmu_link_sp(kvm, iter, sp, shared);
if (ret)
goto out;
--
2.47.2
Powered by blists - more mailing lists