>From ca798b2e1de4d0975ee808108c7514fe738f0898 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 5 Aug 2025 15:58:13 -0700 Subject: [PATCH 5/5] KVM: VMX: Sketch in possible framework for eliding TLB flushes on pCPU migration Not-Signed-off-by: Sean Christopherson (anyone that makes this work deserves full credit) --- arch/x86/kvm/mmu/mmu.c | 3 +++ arch/x86/kvm/mmu/tdp_mmu.c | 2 ++ arch/x86/kvm/vmx/vmx.c | 21 ++++++++++++++------- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6e838cb6c9e1..925efbaae9b9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3854,6 +3854,9 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, sp = kvm_mmu_get_shadow_page(vcpu, gfn, role); ++sp->root_count; + if (level >= PT64_ROOT_4LEVEL) + kvm_x86_call(alloc_root_cpu_mask)(root); + return __pa(sp->spt); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7f3d7229b2c1..bf4b0b9a7816 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -293,6 +293,8 @@ void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool mirror) root = tdp_mmu_alloc_sp(vcpu); tdp_mmu_init_sp(root, NULL, 0, role); + kvm_x86_call(alloc_root_cpu_mask)(root); + /* * TDP MMU roots are kept until they are explicitly invalidated, either * by a memslot update or by the destruction of the VM. Initialize the diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b42747e2293d..e85830189cfc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1395,7 +1395,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) } } -static void vmx_flush_ept_on_pcpu_migration(struct kvm_mmu *mmu); +static void vmx_flush_ept_on_pcpu_migration(struct kvm_mmu *mmu, int cpu); void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) { @@ -1434,8 +1434,8 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) * TLB entries from its previous association with the vCPU. */ if (enable_ept) { - vmx_flush_ept_on_pcpu_migration(&vcpu->arch.root_mmu); - vmx_flush_ept_on_pcpu_migration(&vcpu->arch.guest_mmu); + vmx_flush_ept_on_pcpu_migration(&vcpu->arch.root_mmu, cpu); + vmx_flush_ept_on_pcpu_migration(&vcpu->arch.guest_mmu, cpu); } else { kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } @@ -3261,22 +3261,29 @@ void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) vpid_sync_context(vmx_get_current_vpid(vcpu)); } -static void __vmx_flush_ept_on_pcpu_migration(hpa_t root_hpa) +static void __vmx_flush_ept_on_pcpu_migration(hpa_t root_hpa, int cpu) { + struct kvm_mmu_page *root; + if (!VALID_PAGE(root_hpa)) return; + root = root_to_sp(root_hpa); + if (!WARN_ON_ONCE(!root) && + test_and_set_bit(cpu, root->cpu_flushed_mask)) + return; + vmx_flush_tlb_ept_root(root_hpa); } -static void vmx_flush_ept_on_pcpu_migration(struct kvm_mmu *mmu) +static void vmx_flush_ept_on_pcpu_migration(struct kvm_mmu *mmu, int cpu) { int i; - __vmx_flush_ept_on_pcpu_migration(mmu->root.hpa); + __vmx_flush_ept_on_pcpu_migration(mmu->root.hpa, cpu); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - __vmx_flush_ept_on_pcpu_migration(mmu->prev_roots[i].hpa); + __vmx_flush_ept_on_pcpu_migration(mmu->prev_roots[i].hpa, cpu); } void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) -- 2.50.1.565.gc32cd1483b-goog