[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e4692910-489f-f824-104c-8ad5d99e8f08@redhat.com>
Date: Wed, 28 Sep 2022 19:11:59 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Emanuele Giuseppe Esposito <eesposit@...hat.com>,
kvm@...r.kernel.org
Cc: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
Maxim Levitsky <mlevitsk@...hat.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH 5/9] kvm_main.c: split __kvm_set_memory_region logic
in kvm_check_mem and kvm_prepare_batch
On 9/9/22 12:45, Emanuele Giuseppe Esposito wrote:
> Just a function split. No functional change intended,
> except for the fact that kvm_prepare_batch() does not
> immediately call kvm_set_memslot() if batch->change is
> KVM_MR_DELETE, but delegates the caller (__kvm_set_memory_region).
>
> Signed-off-by: Emanuele Giuseppe Esposito <eesposit@...hat.com>
> ---
> virt/kvm/kvm_main.c | 120 +++++++++++++++++++++++++++++---------------
> 1 file changed, 79 insertions(+), 41 deletions(-)
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 17f07546d591..9d917af30593 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1927,19 +1927,9 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
> return false;
> }
>
> -/*
> - * Allocate some memory and give it an address in the guest physical address
> - * space.
> - *
> - * Discontiguous memory is allowed, mostly for framebuffers.
> - * This function takes also care of initializing batch->new/old/invalid/change
> - * fields.
> - *
> - * Must be called holding kvm->slots_lock for write.
> - */
> -int __kvm_set_memory_region(struct kvm *kvm,
> - const struct kvm_userspace_memory_region *mem,
> - struct kvm_internal_memory_region_list *batch)
> +static int kvm_prepare_batch(struct kvm *kvm,
> + const struct kvm_userspace_memory_region *mem,
> + struct kvm_internal_memory_region_list *batch)
> {
> struct kvm_memory_slot *old, *new;
> struct kvm_memslots *slots;
> @@ -1947,34 +1937,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
> unsigned long npages;
> gfn_t base_gfn;
> int as_id, id;
> - int r;
> -
> - r = check_memory_region_flags(mem);
> - if (r)
> - return r;
>
> as_id = mem->slot >> 16;
> id = (u16)mem->slot;
>
> - /* General sanity checks */
> - if ((mem->memory_size & (PAGE_SIZE - 1)) ||
> - (mem->memory_size != (unsigned long)mem->memory_size))
> - return -EINVAL;
> - if (mem->guest_phys_addr & (PAGE_SIZE - 1))
> - return -EINVAL;
> - /* We can read the guest memory with __xxx_user() later on. */
> - if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
> - (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
> - !access_ok((void __user *)(unsigned long)mem->userspace_addr,
> - mem->memory_size))
> - return -EINVAL;
> - if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
> - return -EINVAL;
> - if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
> - return -EINVAL;
> - if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
> - return -EINVAL;
> -
> slots = __kvm_memslots(kvm, as_id);
>
> /*
> @@ -1993,7 +1959,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
>
> batch->change = KVM_MR_DELETE;
> batch->new = NULL;
> - return kvm_set_memslot(kvm, batch);
> + return 0;
> }
>
> base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
> @@ -2020,7 +1986,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
> else if (mem->flags != old->flags)
> change = KVM_MR_FLAGS_ONLY;
> else /* Nothing to change. */
> - return 0;
> + return 1;
> }
>
> if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
> @@ -2041,12 +2007,81 @@ int __kvm_set_memory_region(struct kvm *kvm,
>
> batch->new = new;
> batch->change = change;
> + return 0;
> +}
> +
> +static int kvm_check_mem(const struct kvm_userspace_memory_region *mem)
> +{
> + int as_id, id;
> +
> + as_id = mem->slot >> 16;
> + id = (u16)mem->slot;
> +
> + /* General sanity checks */
> + if ((mem->memory_size & (PAGE_SIZE - 1)) ||
> + (mem->memory_size != (unsigned long)mem->memory_size))
> + return -EINVAL;
> + if (mem->guest_phys_addr & (PAGE_SIZE - 1))
> + return -EINVAL;
> + /* We can read the guest memory with __xxx_user() later on. */
> + if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
> + (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
> + !access_ok((void __user *)(unsigned long)mem->userspace_addr,
> + mem->memory_size))
> + return -EINVAL;
> + if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
> + return -EINVAL;
> + if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
> + return -EINVAL;
> + if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
> + return -EINVAL;
> +
> + return 0;
> +}
> +
> +static int kvm_check_memory_region(struct kvm *kvm,
> + const struct kvm_userspace_memory_region *mem,
> + struct kvm_internal_memory_region_list *batch)
> +{
> + int r;
> +
> + r = check_memory_region_flags(mem);
> + if (r)
> + return r;
>
> - r = kvm_set_memslot(kvm, batch);
> + r = kvm_check_mem(mem);
> if (r)
> - kfree(new);
> + return r;
> +
> + r = kvm_prepare_batch(kvm, mem, batch);
> + if (r && batch->new)
> + kfree(batch->new);
> +
> return r;
> }
> +
> +/*
> + * Allocate some memory and give it an address in the guest physical address
> + * space.
> + *
> + * Discontiguous memory is allowed, mostly for framebuffers.
> + * This function takes also care of initializing batch->new/old/invalid/change
> + * fields.
> + *
> + * Must be called holding kvm->slots_lock for write.
> + */
> +int __kvm_set_memory_region(struct kvm *kvm,
> + const struct kvm_userspace_memory_region *mem,
> + struct kvm_internal_memory_region_list *batch)
> +{
> + int r;
> +
> + r = kvm_check_memory_region(kvm, mem, batch);
> + if (r)
> + return r;
> +
> + return kvm_set_memslot(kvm, batch);
> +}
> EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
>
> static int kvm_set_memory_region(struct kvm *kvm,
> @@ -2061,6 +2096,9 @@ static int kvm_set_memory_region(struct kvm *kvm,
> mutex_lock(&kvm->slots_lock);
> r = __kvm_set_memory_region(kvm, mem, batch);
> mutex_unlock(&kvm->slots_lock);
> + /* r == 1 means empty request, nothing to do but no error */
> + if (r == 1)
> + r = 0;
This is weird... I think you have the order of the split slightly
messed up. Doing this check earlier, roughly at the same time as the
introduction of the new struct, is probably clearer.
After having reviewed more of the code, I do think you should
disaggregate __kvm_set_memory_region() in separate functions (check,
build entry, prepare, commit) completely. kvm_set_memory_region() calls
them and __kvm_set_memory_region() disappears completely.
Paolo
> return r;
> }
>
Powered by blists - more mailing lists