lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon,  6 Mar 2023 14:41:13 -0800
From:   Vipin Sharma <vipinsh@...gle.com>
To:     seanjc@...gle.com, pbonzini@...hat.com, bgardon@...gle.com,
        dmatlack@...gle.com
Cc:     jmattson@...gle.com, mizhang@...gle.com, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org, Vipin Sharma <vipinsh@...gle.com>
Subject: [Patch v4 04/18] KVM: x86/mmu: Shrink shadow page caches via MMU shrinker

Shrink shadow page caches via MMU shrinker based on
kvm_total_unused_cached_pages. Traverse each vCPU of all of the VMs,
empty the caches and exit the shrinker when sufficient number of pages
have been freed. Also, move processed VMs to the end of vm_list so that
next time other VMs are tortured first.

Signed-off-by: Vipin Sharma <vipinsh@...gle.com>
---
 arch/x86/kvm/mmu/mmu.c   | 55 +++++++++++++++++++++++++++++++++++-----
 include/linux/kvm_host.h |  1 +
 virt/kvm/kvm_main.c      |  6 ++++-
 3 files changed, 54 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 13f41b7ac280..df8dcb7e5de7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6693,16 +6693,57 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
 	}
 }
 
-static unsigned long
-mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
-	return SHRINK_STOP;
+static unsigned long mmu_shrink_scan(struct shrinker *shrink,
+				     struct shrink_control *sc)
+{
+	struct kvm *kvm, *next_kvm, *first_kvm = NULL;
+	struct kvm_mmu_memory_cache *cache;
+	unsigned long i, freed = 0;
+	struct mutex *cache_lock;
+	struct kvm_vcpu *vcpu;
+
+	mutex_lock(&kvm_lock);
+	list_for_each_entry_safe(kvm, next_kvm, &vm_list, vm_list) {
+		if (first_kvm == kvm)
+			break;
+
+		if (!first_kvm)
+			first_kvm = kvm;
+
+		list_move_tail(&kvm->vm_list, &vm_list);
+
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			cache = &vcpu->arch.mmu_shadow_page_cache;
+			cache_lock = &vcpu->arch.mmu_shadow_page_cache_lock;
+			if (mutex_trylock(cache_lock)) {
+				if (cache->nobjs) {
+					freed += cache->nobjs;
+					kvm_mmu_empty_memory_cache(cache);
+				}
+				mutex_unlock(cache_lock);
+				if (freed >= sc->nr_to_scan)
+					goto out;
+			}
+		}
+	}
+out:
+	mutex_unlock(&kvm_lock);
+	if (freed) {
+		percpu_counter_sub(&kvm_total_unused_cached_pages, freed);
+		return freed;
+	} else {
+		return SHRINK_STOP;
+	}
 }
 
-static unsigned long
-mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long mmu_shrink_count(struct shrinker *shrink,
+				      struct shrink_control *sc)
 {
-	return SHRINK_EMPTY;
+	s64 count = percpu_counter_sum(&kvm_total_unused_cached_pages);
+
+	WARN_ON(count < 0);
+	return count <= 0 ? SHRINK_EMPTY : count;
+
 }
 
 static struct shrinker mmu_shrinker = {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8ada23756b0e..5cfa42c130e0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1361,6 +1361,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_empty_memory_cache(struct kvm_mmu_memory_cache *mc);
 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d255964ec331..536d8ab6e61f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -430,7 +430,7 @@ int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
 	return mc->nobjs;
 }
 
-void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+void kvm_mmu_empty_memory_cache(struct kvm_mmu_memory_cache *mc)
 {
 	while (mc->nobjs) {
 		if (mc->kmem_cache)
@@ -438,7 +438,11 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 		else
 			free_page((unsigned long)mc->objects[--mc->nobjs]);
 	}
+}
 
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+	kvm_mmu_empty_memory_cache(mc);
 	kvfree(mc->objects);
 
 	mc->objects = NULL;
-- 
2.40.0.rc0.216.gc4246ad0f0-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ