[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <12ad52ec-f5cc-43ff-9051-040fceeed68b@redhat.com>
Date: Thu, 17 Oct 2024 18:52:33 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Yan Zhao <yan.y.zhao@...el.com>, Sagi Shahar <sagis@...gle.com>,
Alex Bennée <alex.bennee@...aro.org>,
David Matlack <dmatlack@...gle.com>, James Houghton <jthoughton@...gle.com>
Subject: Re: [PATCH 14/18] KVM: x86/mmu: Stop processing TDP MMU roots for
test_age if young SPTE found
On 10/11/24 04:10, Sean Christopherson wrote:
> Return immediately if a young SPTE is found when testing, but not updating,
> SPTEs. The return value is a boolean, i.e. whether there is one young SPTE
> or fifty is irrelevant (ignoring the fact that it's impossible for there to
> be fifty SPTEs, as KVM has a hard limit on the number of valid TDP MMU
> roots).
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/mmu/tdp_mmu.c | 84 ++++++++++++++++++--------------------
> 1 file changed, 40 insertions(+), 44 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index e8c061bf94ec..f412bca206c5 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1192,35 +1192,6 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
> return flush;
> }
>
> -typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
> - struct kvm_gfn_range *range);
> -
> -static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
> - struct kvm_gfn_range *range,
> - tdp_handler_t handler)
> -{
> - struct kvm_mmu_page *root;
> - struct tdp_iter iter;
> - bool ret = false;
> -
> - /*
> - * Don't support rescheduling, none of the MMU notifiers that funnel
> - * into this helper allow blocking; it'd be dead, wasteful code. Note,
> - * this helper must NOT be used to unmap GFNs, as it processes only
> - * valid roots!
> - */
> - for_each_valid_tdp_mmu_root(kvm, root, range->slot->as_id) {
> - rcu_read_lock();
> -
> - tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
> - ret |= handler(kvm, &iter, range);
> -
> - rcu_read_unlock();
> - }
> -
> - return ret;
> -}
> -
> /*
> * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
> * if any of the GFNs in the range have been accessed.
> @@ -1229,15 +1200,10 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
> * from the clear_young() or clear_flush_young() notifier, which uses the
> * return value to determine if the page has been accessed.
> */
> -static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
> - struct kvm_gfn_range *range)
> +static void kvm_tdp_mmu_age_spte(struct tdp_iter *iter)
> {
> u64 new_spte;
>
> - /* If we have a non-accessed entry we don't need to change the pte. */
> - if (!is_accessed_spte(iter->old_spte))
> - return false;
> -
> if (spte_ad_enabled(iter->old_spte)) {
> iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
> iter->old_spte,
> @@ -1253,23 +1219,53 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
>
> trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
> iter->old_spte, new_spte);
> - return true;
> +}
> +
> +static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
> + struct kvm_gfn_range *range,
> + bool test_only)
> +{
> + struct kvm_mmu_page *root;
> + struct tdp_iter iter;
> + bool ret = false;
> +
> + /*
> + * Don't support rescheduling, none of the MMU notifiers that funnel
> + * into this helper allow blocking; it'd be dead, wasteful code. Note,
> + * this helper must NOT be used to unmap GFNs, as it processes only
> + * valid roots!
> + */
> + for_each_valid_tdp_mmu_root(kvm, root, range->slot->as_id) {
> + rcu_read_lock();
> +
> + tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) {
> + if (!is_accessed_spte(iter.old_spte))
> + continue;
> +
> + ret = true;
> + if (test_only)
> + break;
> +
> + kvm_tdp_mmu_age_spte(&iter);
> + }
> +
> + rcu_read_unlock();
> +
> + if (ret && test_only)
> + break;
> + }
> +
> + return ret;
> }
If you use guard(rcu)() you can avoid the repeated breaks:
for_each_valid_tdp_mmu_root(kvm, root, range->slot->as_id) {
guard(rcu)();
tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) {
if (!is_accessed_spte(iter.old_spte))
continue;
ret = true;
if (test_only)
return ret;
kvm_tdp_mmu_age_spte(&iter);
}
}
return ret;
Paolo
Powered by blists - more mailing lists