lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Fri, 26 Apr 2024 17:01:41 +0000
From: "Edgecombe, Rick P" <rick.p.edgecombe@...el.com>
To: "seanjc@...gle.com" <seanjc@...gle.com>
CC: "Zhang, Tina" <tina.zhang@...el.com>, "Yuan, Hang" <hang.yuan@...el.com>,
	"Huang, Kai" <kai.huang@...el.com>, "binbin.wu@...ux.intel.com"
	<binbin.wu@...ux.intel.com>, "Chen, Bo2" <chen.bo@...el.com>,
	"sagis@...gle.com" <sagis@...gle.com>, "linux-kernel@...r.kernel.org"
	<linux-kernel@...r.kernel.org>, "isaku.yamahata@...ux.intel.com"
	<isaku.yamahata@...ux.intel.com>, "tabba@...gle.com" <tabba@...gle.com>,
	"isaku.yamahata@...il.com" <isaku.yamahata@...il.com>, "Aktas, Erdem"
	<erdemaktas@...gle.com>, "Yamahata, Isaku" <isaku.yamahata@...el.com>,
	"kvm@...r.kernel.org" <kvm@...r.kernel.org>, "pbonzini@...hat.com"
	<pbonzini@...hat.com>
Subject: Re: [PATCH v19 011/130] KVM: Add new members to struct kvm_gfn_range
 to operate on

On Fri, 2024-04-26 at 09:49 -0700, Sean Christopherson wrote:
> 
> Heh, where's your bitmask abuse spirit?  It's a little evil (and by "evil" I
> mean
> awesome), but the need to process different roots is another good argument for
> an
> enum+bitmask.

Haha. There seems to be some special love for bit math in KVM. I was just
relaying my struggle to understand permission_fault() the other day.

> 
> enum tdp_mmu_root_types {
>         KVM_SHARED_ROOTS = KVM_PROCESS_SHARED,
>         KVM_PRIVATE_ROOTS = KVM_PROCESS_PRIVATE,
>         KVM_VALID_ROOTS = BIT(2),
>         KVM_ANY_VALID_ROOT = KVM_SHARED_ROOT | KVM_PRIVATE_ROOT |
> KVM_VALID_ROOT,
>         KVM_ANY_ROOT = KVM_SHARED_ROOT | KVM_PRIVATE_ROOT,
> }
> static_assert(!(KVM_SHARED_ROOTS & KVM_VALID_ROOTS));
> static_assert(!(KVM_PRIVATE_ROOTS & KVM_VALID_ROOTS));
> static_assert(KVM_PRIVATE_ROOTS == (KVM_SHARED_ROOTS << 1));
> 
> /*
>  * Returns the next root after @prev_root (or the first root if @prev_root is
>  * NULL).  A reference to the returned root is acquired, and the reference to
>  * @prev_root is released (the caller obviously must hold a reference to
>  * @prev_root if it's non-NULL).
>  *
>  * Returns NULL if the end of tdp_mmu_roots was reached.
>  */
> static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
>                                               struct kvm_mmu_page *prev_root,
>                                               enum tdp_mmu_root_types types)
> {
>         bool only_valid = types & KVM_VALID_ROOTS;
>         struct kvm_mmu_page *next_root;
> 
>         /*
>          * While the roots themselves are RCU-protected, fields such as
>          * role.invalid are protected by mmu_lock.
>          */
>         lockdep_assert_held(&kvm->mmu_lock);
> 
>         rcu_read_lock();
> 
>         if (prev_root)
>                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
>                                                   &prev_root->link,
>                                                   typeof(*prev_root), link);
>         else
>                 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
>                                                    typeof(*next_root), link);
> 
>         while (next_root) {
>                 if ((!only_valid || !next_root->role.invalid) &&
>                     (types & (KVM_SHARED_ROOTS << is_private_sp(root))) &&
>                     kvm_tdp_mmu_get_root(next_root))
>                         break;
> 
>                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
>                                 &next_root->link, typeof(*next_root), link);
>         }
> 
>         rcu_read_unlock();
> 
>         if (prev_root)
>                 kvm_tdp_mmu_put_root(kvm, prev_root);
> 
>         return next_root;
> }
> 
> #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id,
> _types)         \
>         for (_root = tdp_mmu_next_root(_kvm, NULL,
> _types);                     \
>              ({ lockdep_assert_held(&(_kvm)->mmu_lock); }),
> _root;              \
>              _root = tdp_mmu_next_root(_kvm, _root,
> _types))                    \
>                 if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id)
> {       \
>                 } else
> 
> #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root,
> _as_id)             \
>         __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id,
> KVM_ANY_VALID_ROOT)
> 
> #define for_each_tdp_mmu_root_yield_safe(_kvm,
> _root)                           \
>         for (_root = tdp_mmu_next_root(_kvm, NULL,
> KVM_ANY_ROOT);               \
>              ({ lockdep_assert_held(&(_kvm)->mmu_lock); }),
> _root;              \
>              _root = tdp_mmu_next_root(_kvm, _root, KVM_ANY_ROOT))

Ohh, yes move it into the iterators. I like it a lot.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ