[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20200408074645.dr6dgybrwwjz2tcp@kamzik.brq.redhat.com>
Date: Wed, 8 Apr 2020 09:46:45 +0200
From: Andrew Jones <drjones@...hat.com>
To: Wainer dos Santos Moschetta <wainersm@...hat.com>
Cc: pbonzini@...hat.com, kvm@...r.kernel.org, david@...hat.com,
linux-kernel@...r.kernel.org, linux-kselftest@...r.kernel.org
Subject: Re: [PATCH v3 2/2] selftests: kvm: Add mem_slot_test test
On Tue, Apr 07, 2020 at 12:37:31PM -0300, Wainer dos Santos Moschetta wrote:
> This patch introduces the mem_slot_test test which checks
> an VM can have added memory slots up to the limit defined in
> KVM_CAP_NR_MEMSLOTS. Then attempt to add one more slot to
> verify it fails as expected.
>
> Signed-off-by: Wainer dos Santos Moschetta <wainersm@...hat.com>
> ---
> tools/testing/selftests/kvm/.gitignore | 1 +
> tools/testing/selftests/kvm/Makefile | 3 +
> tools/testing/selftests/kvm/mem_slot_test.c | 85 +++++++++++++++++++++
> 3 files changed, 89 insertions(+)
> create mode 100644 tools/testing/selftests/kvm/mem_slot_test.c
>
> diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
> index 16877c3daabf..127d27188427 100644
> --- a/tools/testing/selftests/kvm/.gitignore
> +++ b/tools/testing/selftests/kvm/.gitignore
> @@ -21,4 +21,5 @@
> /demand_paging_test
> /dirty_log_test
> /kvm_create_max_vcpus
> +/mem_slot_test
> /steal_time
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 712a2ddd2a27..338b6cdce1a0 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -32,12 +32,14 @@ TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
> TEST_GEN_PROGS_x86_64 += demand_paging_test
> TEST_GEN_PROGS_x86_64 += dirty_log_test
> TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
> +TEST_GEN_PROGS_x86_64 += mem_slot_test
> TEST_GEN_PROGS_x86_64 += steal_time
>
> TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
> TEST_GEN_PROGS_aarch64 += demand_paging_test
> TEST_GEN_PROGS_aarch64 += dirty_log_test
> TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
> +TEST_GEN_PROGS_aarch64 += mem_slot_test
> TEST_GEN_PROGS_aarch64 += steal_time
>
> TEST_GEN_PROGS_s390x = s390x/memop
> @@ -46,6 +48,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
> TEST_GEN_PROGS_s390x += demand_paging_test
> TEST_GEN_PROGS_s390x += dirty_log_test
> TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
> +TEST_GEN_PROGS_s390x += mem_slot_test
>
> TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
> LIBKVM += $(LIBKVM_$(UNAME_M))
> diff --git a/tools/testing/selftests/kvm/mem_slot_test.c b/tools/testing/selftests/kvm/mem_slot_test.c
> new file mode 100644
> index 000000000000..0588dc2e8e01
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/mem_slot_test.c
> @@ -0,0 +1,85 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * mem_slot_test
> + *
> + * Copyright (C) 2020, Red Hat, Inc.
> + *
> + * Test suite for memory region operations.
> + */
> +#define _GNU_SOURCE /* for program_invocation_short_name */
> +#include <linux/kvm.h>
> +#include <sys/mman.h>
> +
> +#include "test_util.h"
> +#include "kvm_util.h"
> +
> +/*
> + * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
> + * tentative to add further slots should fail.
> + */
> +static void test_add_max_slots(void)
> +{
> + int ret;
> + struct kvm_userspace_memory_region *kvm_region;
nit: this doesn't need to be a pointer, requiring the malloc.
> + struct kvm_vm *vm;
> + uint32_t max_mem_slots;
> + uint32_t mem_reg_flags;
> + uint32_t slot;
> + uint64_t guest_addr;
> + uint64_t mem_reg_npages;
> + uint64_t mem_reg_size;
> +
> + max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
> + TEST_ASSERT(max_mem_slots > 0,
> + "KVM_CAP_NR_MEMSLOTS should be greater than 0");
> + pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
> +
> + vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
> +
> + /*
> + * Uses 1MB sized/aligned memory region since this is the minimal
> + * required on s390x.
> + */
> + mem_reg_size = 0x100000;
> + mem_reg_npages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, mem_reg_size);
> + mem_reg_flags = 0;
nit: don't really need mem_reg_flags anymore.
> +
> + guest_addr = 0x0;
> +
> + /* Check it can be added memory slots up to the maximum allowed */
> + pr_info("Adding slots 0..%i, each memory region with %ldK size\n",
> + (max_mem_slots - 1), mem_reg_size >> 10);
> + for (slot = 0; slot < max_mem_slots; slot++) {
> + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
> + guest_addr, slot, mem_reg_npages,
> + mem_reg_flags);
> + guest_addr += mem_reg_size;
> + }
> +
> + /* Check it cannot be added memory slots beyond the limit */
> + void *mem = mmap(NULL, mem_reg_size, PROT_READ | PROT_WRITE,
> + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
nit: mem is another middle of block declaration that I didn't notice before
> + TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
> +
> + kvm_region = malloc(sizeof(struct kvm_userspace_memory_region));
> + TEST_ASSERT(kvm_region,
> + "Failed to malloc() kvm_userspace_memory_region");
> + kvm_region->slot = slot;
> + kvm_region->flags = mem_reg_flags;
> + kvm_region->guest_phys_addr = guest_addr;
> + kvm_region->userspace_addr = (uint64_t) mem;
You're missing memory_size here, and it's not even guaranteed to be zero,
since we malloc'ed and didn't zero initialize.
Actually, I'd probably just drop kvm_region and use a compound literal
in the ioctl.
> +
> + ret = ioctl(vm_get_fd(vm), KVM_SET_USER_MEMORY_REGION, kvm_region);
> + TEST_ASSERT(ret == -1 && errno == EINVAL,
> + "Adding one more memory slot should fail with EINVAL");
> +
> + munmap(mem, mem_reg_size);
> + free(kvm_region);
> + kvm_vm_free(vm);
> +}
> +
> +int main(int argc, char *argv[])
> +{
> + test_add_max_slots();
> + return 0;
> +}
> --
> 2.17.2
>
Thanks,
drew
Powered by blists - more mailing lists