lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  1 Sep 2020 19:55:42 +0800
From:   yulei.kernel@...il.com
To:     pbonzini@...hat.com
Cc:     kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
        sean.j.christopherson@...el.com, jmattson@...gle.com,
        junaids@...gle.com, bgardon@...gle.com, vkuznets@...hat.com,
        xiaoguangrong.eric@...il.com, kernellwp@...il.com,
        lihaiwei.kernel@...il.com, Yulei Zhang <yulei.kernel@...il.com>,
        Yulei Zhang <yuleixzhang@...cent.com>
Subject: [RFC V2 4/9] Add release function for direct build ept when guest VM exit

From: Yulei Zhang <yulei.kernel@...il.com>

Release the pre-pinned memory in direct build ept when guest VM
exit.

Signed-off-by: Yulei Zhang <yuleixzhang@...cent.com>
---
 arch/x86/kvm/mmu/mmu.c | 37 ++++++++++++++++++++++++++++---------
 1 file changed, 28 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 03c5e73b96cb..f2124f52b286 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4309,8 +4309,11 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
 		     bool skip_mmu_sync)
 {
-	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
-			  skip_tlb_flush, skip_mmu_sync);
+	if (!vcpu->arch.direct_build_tdp)
+		__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
+				  skip_tlb_flush, skip_mmu_sync);
+	else
+		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
 
@@ -5207,10 +5210,14 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
 
 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 {
-	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
-	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
-	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
-	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
+	if (!vcpu->arch.direct_build_tdp) {
+		kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
+		WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
+		kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+		WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
+	}
+	vcpu->arch.direct_build_tdp = false;
+	vcpu->arch.mmu->root_hpa = INVALID_PAGE;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
 
@@ -6538,6 +6545,14 @@ void kvm_direct_tdp_remove_page_table(struct kvm *kvm, struct kvm_memory_slot *s
 	kvm_flush_remote_tlbs(kvm);
 }
 
+void kvm_direct_tdp_release_global_root(struct kvm *kvm)
+{
+	if (kvm->arch.global_root_hpa)
+		__kvm_walk_global_page(kvm, kvm->arch.global_root_hpa, max_tdp_level);
+
+	return;
+}
+
 /*
  * Calculate mmu pages needed for kvm.
  */
@@ -6564,9 +6579,13 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
-	kvm_mmu_unload(vcpu);
-	free_mmu_pages(&vcpu->arch.root_mmu);
-	free_mmu_pages(&vcpu->arch.guest_mmu);
+	if (vcpu->arch.direct_build_tdp) {
+		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
+	} else {
+		kvm_mmu_unload(vcpu);
+		free_mmu_pages(&vcpu->arch.root_mmu);
+		free_mmu_pages(&vcpu->arch.guest_mmu);
+	}
 	mmu_free_memory_caches(vcpu);
 }
 
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ