[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230309180135.000043fe@gmail.com>
Date: Thu, 9 Mar 2023 18:01:35 +0200
From: Zhi Wang <zhi.wang.linux@...il.com>
To: Vipin Sharma <vipinsh@...gle.com>
Cc: seanjc@...gle.com, pbonzini@...hat.com, bgardon@...gle.com,
dmatlack@...gle.com, jmattson@...gle.com, mizhang@...gle.com,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [Patch v4 06/18] KVM: x86/mmu: Shrink split_shadow_page_cache
via MMU shrinker
On Mon, 6 Mar 2023 14:41:15 -0800
Vipin Sharma <vipinsh@...gle.com> wrote:
> Use MMU shrinker to free unused pages in split_shadow_page_cache.
> Refactor the code and make common function to try emptying the page cache.
>
> Signed-off-by: Vipin Sharma <vipinsh@...gle.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 34 +++++++++++++++++++++-------------
> 1 file changed, 21 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 0ebb8a2eaf47..73a0ac9c11ce 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6696,13 +6696,24 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
> }
> }
>
After adding the lock in the kvm_mmu_memory_cache, the cache_lock doesn't need
to be passed here and in mmu_shrink_scan().
> +static int mmu_memory_cache_try_empty(struct kvm_mmu_memory_cache *cache,
> + struct mutex *cache_lock)
> +{
> + int freed = 0;
> +
> + if (mutex_trylock(cache_lock)) {
> + freed = cache->nobjs;
> + kvm_mmu_empty_memory_cache(cache);
> + mutex_unlock(cache_lock);
> + }
> + return freed;
> +}
> +
> static unsigned long mmu_shrink_scan(struct shrinker *shrink,
> struct shrink_control *sc)
> {
> struct kvm *kvm, *next_kvm, *first_kvm = NULL;
> - struct kvm_mmu_memory_cache *cache;
> unsigned long i, freed = 0;
> - struct mutex *cache_lock;
> struct kvm_vcpu *vcpu;
>
> mutex_lock(&kvm_lock);
> @@ -6716,18 +6727,15 @@ static unsigned long mmu_shrink_scan(struct shrinker *shrink,
> list_move_tail(&kvm->vm_list, &vm_list);
>
> kvm_for_each_vcpu(i, vcpu, kvm) {
> - cache = &vcpu->arch.mmu_shadow_page_cache;
> - cache_lock = &vcpu->arch.mmu_shadow_page_cache_lock;
> - if (mutex_trylock(cache_lock)) {
> - if (cache->nobjs) {
> - freed += cache->nobjs;
> - kvm_mmu_empty_memory_cache(cache);
> - }
> - mutex_unlock(cache_lock);
> - if (freed >= sc->nr_to_scan)
> - goto out;
> - }
> + freed += mmu_memory_cache_try_empty(&vcpu->arch.mmu_shadow_page_cache,
> + &vcpu->arch.mmu_shadow_page_cache_lock);
> + if (freed >= sc->nr_to_scan)
> + goto out;
> }
> + freed += mmu_memory_cache_try_empty(&kvm->arch.split_shadow_page_cache,
> + &kvm->slots_lock);
> + if (freed >= sc->nr_to_scan)
> + goto out;
> }
> out:
> mutex_unlock(&kvm_lock);
Powered by blists - more mailing lists