[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <s4ln7kgzj3jlpaei2e7j2n3w3xq3bgeed4kgm3xwadlxu56b5b@tgmxd4psomda>
Date: Thu, 22 Aug 2024 13:00:50 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Peng Zhang <zhangpeng.00@...edance.com>
Cc: pbonzini@...hat.com, chao.p.peng@...ux.intel.com, seanjc@...gle.com,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
maple-tree@...ts.infradead.org
Subject: Re: [PATCH RFC] KVM: Use maple tree to manage memory attributes.
* Peng Zhang <zhangpeng.00@...edance.com> [240822 02:56]:
> Currently, xarray is used to manage memory attributes. The memory
> attributes management here is an interval problem. However, xarray is
> not suitable for handling interval problems. It may cause memory waste
> and is not efficient. Switching it to maple tree is more elegant. Using
> maple tree here has the following three advantages:
> 1. Less memory overhead.
> 2. More efficient interval operations.
> 3. Simpler code.
>
> This is the first user of the maple tree interface mas_find_range(),
> and it does not have any test cases yet, so its stability is unclear.
>
> Signed-off-by: Peng Zhang <zhangpeng.00@...edance.com>
> ---
> include/linux/kvm_host.h | 5 +++--
> virt/kvm/kvm_main.c | 47 ++++++++++++++--------------------------
> 2 files changed, 19 insertions(+), 33 deletions(-)
>
> I haven't tested this code yet, and I'm not very familiar with kvm, so I'd
> be happy if someone could help test it. This is just an RFC now. Any comments
> are welcome.
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 79a6b1a63027..9b3351d88d64 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -35,6 +35,7 @@
> #include <linux/interval_tree.h>
> #include <linux/rbtree.h>
> #include <linux/xarray.h>
> +#include <linux/maple_tree.h>
> #include <asm/signal.h>
>
> #include <linux/kvm.h>
> @@ -839,7 +840,7 @@ struct kvm {
> #endif
> #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
> /* Protected by slots_locks (for writes) and RCU (for reads) */
> - struct xarray mem_attr_array;
> + struct maple_tree mem_attr_mtree;
> #endif
> char stats_id[KVM_STATS_NAME_SIZE];
> };
> @@ -2410,7 +2411,7 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
> #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
> static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
> {
> - return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
> + return xa_to_value(mtree_load(&kvm->mem_attr_mtree, gfn));
> }
>
> bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 92901656a0d4..9a99c334f4af 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -10,6 +10,7 @@
> * Yaniv Kamay <yaniv@...ranet.com>
> */
>
> +#include "linux/maple_tree.h"
> #include <kvm/iodev.h>
>
> #include <linux/kvm_host.h>
> @@ -1159,7 +1160,8 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
> rcuwait_init(&kvm->mn_memslots_update_rcuwait);
> xa_init(&kvm->vcpu_array);
> #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
> - xa_init(&kvm->mem_attr_array);
> + mt_init_flags(&kvm->mem_attr_mtree, MT_FLAGS_LOCK_EXTERN);
> + mt_set_external_lock(&kvm->mem_attr_mtree, &kvm->slots_lock);
> #endif
>
> INIT_LIST_HEAD(&kvm->gpc_list);
> @@ -1356,7 +1358,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
> cleanup_srcu_struct(&kvm->irq_srcu);
> cleanup_srcu_struct(&kvm->srcu);
> #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
> - xa_destroy(&kvm->mem_attr_array);
> + mutex_lock(&kvm->slots_lock);
> + __mt_destroy(&kvm->mem_attr_mtree);
> + mutex_unlock(&kvm->slots_lock);
> #endif
> kvm_arch_free_vm(kvm);
> preempt_notifier_dec();
> @@ -2413,30 +2417,20 @@ static u64 kvm_supported_mem_attributes(struct kvm *kvm)
> bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
> unsigned long mask, unsigned long attrs)
> {
> - XA_STATE(xas, &kvm->mem_attr_array, start);
> - unsigned long index;
> + MA_STATE(mas, &kvm->mem_attr_mtree, start, start);
> void *entry;
>
> mask &= kvm_supported_mem_attributes(kvm);
> if (attrs & ~mask)
> return false;
>
> - if (end == start + 1)
> - return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
> -
> guard(rcu)();
> - if (!attrs)
> - return !xas_find(&xas, end - 1);
> -
> - for (index = start; index < end; index++) {
> - do {
> - entry = xas_next(&xas);
> - } while (xas_retry(&xas, entry));
>
> - if (xas.xa_index != index ||
> - (xa_to_value(entry) & mask) != attrs)
> + do {
> + entry = mas_find_range(&mas, end - 1);
> + if ((xa_to_value(entry) & mask) != attrs)
> return false;
> - }
> + } while (mas.last < end - 1);
Oh, a contiguous iterator.. This is what mas_find_range() was written
for.
This should work with the guard(rcu)(); call with the internal lock.
>
> return true;
> }
> @@ -2524,9 +2518,9 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
> .on_lock = kvm_mmu_invalidate_end,
> .may_block = true,
> };
> - unsigned long i;
> void *entry;
> int r = 0;
> + MA_STATE(mas, &kvm->mem_attr_mtree, start, end - 1);
>
> entry = attributes ? xa_mk_value(attributes) : NULL;
>
> @@ -2540,20 +2534,11 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
> * Reserve memory ahead of time to avoid having to deal with failures
> * partway through setting the new attributes.
> */
> - for (i = start; i < end; i++) {
> - r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
> - if (r)
> - goto out_unlock;
> - }
> -
> + r = mas_preallocate(&mas, entry, GFP_KERNEL_ACCOUNT);
> + if (r)
> + goto out_unlock;
> kvm_handle_gfn_range(kvm, &pre_set_range);
> -
> - for (i = start; i < end; i++) {
> - r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
> - GFP_KERNEL_ACCOUNT));
> - KVM_BUG_ON(r, kvm);
> - }
> -
> + mas_store_prealloc(&mas, entry);
I think you can just leave a spinlock and take it for this
preallocate/store?
> kvm_handle_gfn_range(kvm, &post_set_range);
>
> out_unlock:
> --
> 2.20.1
>
>
> --
> maple-tree mailing list
> maple-tree@...ts.infradead.org
> https://lists.infradead.org/mailman/listinfo/maple-tree
Powered by blists - more mailing lists