[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YtsPk5+hZNMEwT0c@google.com>
Date: Fri, 22 Jul 2022 20:58:59 +0000
From: Mingwei Zhang <mizhang@...gle.com>
To: Yosry Ahmed <yosryahmed@...gle.com>
Cc: Tejun Heo <tj@...nel.org>, Johannes Weiner <hannes@...xchg.org>,
Zefan Li <lizefan.x@...edance.com>,
Marc Zyngier <maz@...nel.org>,
James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeelb@...gle.com>,
Oliver Upton <oupton@...gle.com>, cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.cs.columbia.edu, kvm@...r.kernel.org,
linux-mm@...ck.org
Subject: Re: [PATCH v4 3/4] KVM: x86/mmu: count KVM mmu usage in secondary
pagetable stats.
On Fri, Apr 29, 2022, Yosry Ahmed wrote:
> Count the pages used by KVM mmu on x86 for in secondary pagetable stats.
>
> For the legacy mmu, accounting pagetable stats is combined KVM's
> existing for mmu pages in newly introduced kvm_[un]account_mmu_page()
> helpers.
>
> For tdp mmu, introduce new tdp_[un]account_mmu_page() helpers. That
> combines accounting pagetable stats with the tdp_mmu_pages counter
> accounting.
>
> tdp_mmu_pages counter introduced in this series [1]. This patch was
> rebased on top of the first two patches in that series.
>
> [1]https://lore.kernel.org/lkml/20220401063636.2414200-1-mizhang@google.com/
>
> Signed-off-by: Yosry Ahmed <yosryahmed@...gle.com>
> ---
It looks like there are two metrics for mmu in x86: one for shadow mmu
and the other for TDP mmu. Is there any plan to merge them together?
> arch/x86/kvm/mmu/mmu.c | 16 ++++++++++++++--
> arch/x86/kvm/mmu/tdp_mmu.c | 16 ++++++++++++++--
> 2 files changed, 28 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 78d8e1d8fb99..e5b0e826445d 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1679,6 +1679,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
> percpu_counter_add(&kvm_total_used_mmu_pages, nr);
> }
>
> +static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
> +{
> + kvm_mod_used_mmu_pages(kvm, +1);
> + kvm_account_pgtable_pages((void *)sp->spt, +1);
> +}
> +
> +static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
> +{
> + kvm_mod_used_mmu_pages(kvm, -1);
> + kvm_account_pgtable_pages((void *)sp->spt, -1);
> +}
> +
> static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
> {
> MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
> @@ -1734,7 +1746,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
> */
> sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
> list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
> - kvm_mod_used_mmu_pages(vcpu->kvm, +1);
> + kvm_account_mmu_page(vcpu->kvm, sp);
> return sp;
> }
>
> @@ -2363,7 +2375,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
> list_add(&sp->link, invalid_list);
> else
> list_move(&sp->link, invalid_list);
> - kvm_mod_used_mmu_pages(kvm, -1);
> + kvm_unaccount_mmu_page(kvm, sp);
> } else {
> /*
> * Remove the active root from the active page list, the root
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 3456277ade18..6295c4da5dee 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -371,6 +371,18 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
> }
> }
>
> +static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
> +{
> + atomic64_inc(&kvm->arch.tdp_mmu_pages);
> + kvm_account_pgtable_pages((void *)sp->spt, +1);
> +}
> +
> +static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
> +{
> + atomic64_dec(&kvm->arch.tdp_mmu_pages);
> + kvm_account_pgtable_pages((void *)sp->spt, -1);
> +}
> +
> /**
> * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
> *
> @@ -383,7 +395,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
> static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
> bool shared)
> {
> - atomic64_dec(&kvm->arch.tdp_mmu_pages);
> + tdp_unaccount_mmu_page(kvm, sp);
>
> if (!sp->lpage_disallowed)
> return;
> @@ -1121,7 +1133,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
> tdp_mmu_set_spte(kvm, iter, spte);
> }
>
> - atomic64_inc(&kvm->arch.tdp_mmu_pages);
> + tdp_account_mmu_page(kvm, sp);
>
> return 0;
> }
> --
> 2.36.0.464.gb9c8b46e94-goog
>
Powered by blists - more mailing lists