[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZivazWQw1oCU8VBC@google.com>
Date: Fri, 26 Apr 2024 09:49:53 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Rick P Edgecombe <rick.p.edgecombe@...el.com>
Cc: Tina Zhang <tina.zhang@...el.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>, Hang Yuan <hang.yuan@...el.com>,
Kai Huang <kai.huang@...el.com>,
"binbin.wu@...ux.intel.com" <binbin.wu@...ux.intel.com>, "sagis@...gle.com" <sagis@...gle.com>,
Bo Chen <chen.bo@...el.com>,
"isaku.yamahata@...ux.intel.com" <isaku.yamahata@...ux.intel.com>, "tabba@...gle.com" <tabba@...gle.com>,
Erdem Aktas <erdemaktas@...gle.com>,
"isaku.yamahata@...il.com" <isaku.yamahata@...il.com>, "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
Isaku Yamahata <isaku.yamahata@...el.com>, "pbonzini@...hat.com" <pbonzini@...hat.com>
Subject: Re: [PATCH v19 011/130] KVM: Add new members to struct kvm_gfn_range
to operate on
On Fri, Apr 26, 2024, Rick P Edgecombe wrote:
> On Fri, 2024-04-26 at 08:28 -0700, Sean Christopherson wrote:
> > If the choice is between an enum and exclude_*, I would strongly prefer the
> > enum. Using exclude_* results in inverted polarity for the code that
> > triggers invalidations.
>
> Right, the awkwardness lands in that code.
>
> The processing code looks nice though:
> https://lore.kernel.org/kvm/5210e6e6e2eb73b04cb7039084015612479ae2fe.camel@intel.com/
Heh, where's your bitmask abuse spirit? It's a little evil (and by "evil" I mean
awesome), but the need to process different roots is another good argument for an
enum+bitmask.
enum tdp_mmu_root_types {
KVM_SHARED_ROOTS = KVM_PROCESS_SHARED,
KVM_PRIVATE_ROOTS = KVM_PROCESS_PRIVATE,
KVM_VALID_ROOTS = BIT(2),
KVM_ANY_VALID_ROOT = KVM_SHARED_ROOT | KVM_PRIVATE_ROOT | KVM_VALID_ROOT,
KVM_ANY_ROOT = KVM_SHARED_ROOT | KVM_PRIVATE_ROOT,
}
static_assert(!(KVM_SHARED_ROOTS & KVM_VALID_ROOTS));
static_assert(!(KVM_PRIVATE_ROOTS & KVM_VALID_ROOTS));
static_assert(KVM_PRIVATE_ROOTS == (KVM_SHARED_ROOTS << 1));
/*
* Returns the next root after @prev_root (or the first root if @prev_root is
* NULL). A reference to the returned root is acquired, and the reference to
* @prev_root is released (the caller obviously must hold a reference to
* @prev_root if it's non-NULL).
*
* Returns NULL if the end of tdp_mmu_roots was reached.
*/
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *prev_root,
enum tdp_mmu_root_types types)
{
bool only_valid = types & KVM_VALID_ROOTS;
struct kvm_mmu_page *next_root;
/*
* While the roots themselves are RCU-protected, fields such as
* role.invalid are protected by mmu_lock.
*/
lockdep_assert_held(&kvm->mmu_lock);
rcu_read_lock();
if (prev_root)
next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
&prev_root->link,
typeof(*prev_root), link);
else
next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
typeof(*next_root), link);
while (next_root) {
if ((!only_valid || !next_root->role.invalid) &&
(types & (KVM_SHARED_ROOTS << is_private_sp(root))) &&
kvm_tdp_mmu_get_root(next_root))
break;
next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
&next_root->link, typeof(*next_root), link);
}
rcu_read_unlock();
if (prev_root)
kvm_tdp_mmu_put_root(kvm, prev_root);
return next_root;
}
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types) \
for (_root = tdp_mmu_next_root(_kvm, NULL, _types); \
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
_root = tdp_mmu_next_root(_kvm, _root, _types)) \
if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
} else
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_ANY_VALID_ROOT)
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ANY_ROOT); \
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
_root = tdp_mmu_next_root(_kvm, _root, KVM_ANY_ROOT))
Powered by blists - more mailing lists