[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fml27mxsuif3zbnbpperflml24ftis7uhdu5jymrajb3hroqts@wlxrnzxaigrs>
Date: Wed, 26 Nov 2025 10:23:38 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Mike Rapoport <rppt@...nel.org>
Cc: linux-mm@...ck.org, Andrea Arcangeli <aarcange@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
David Hildenbrand <david@...hat.com>, Hugh Dickins <hughd@...gle.com>,
James Houghton <jthoughton@...gle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Michal Hocko <mhocko@...e.com>, Nikita Kalyazin <kalyazin@...zon.com>,
Paolo Bonzini <pbonzini@...hat.com>, Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>, Shuah Khan <shuah@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, linux-kselftest@...r.kernel.org
Subject: Re: [PATCH v2 5/5] KVM: selftests: test userfaultfd minor for
guest_memfd
* Mike Rapoport <rppt@...nel.org> [251125 13:39]:
> From: Nikita Kalyazin <kalyazin@...zon.com>
>
> The test demonstrates that a minor userfaultfd event in guest_memfd can
> be resolved via a memcpy followed by a UFFDIO_CONTINUE ioctl.
>
> Signed-off-by: Nikita Kalyazin <kalyazin@...zon.com>
> Co-developed-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
Acked-by: Liam R. Howlett <Liam.Howlett@...cle.com>
> ---
> .../testing/selftests/kvm/guest_memfd_test.c | 103 ++++++++++++++++++
> 1 file changed, 103 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
> index e7d9aeb418d3..a5d3ed21d7bb 100644
> --- a/tools/testing/selftests/kvm/guest_memfd_test.c
> +++ b/tools/testing/selftests/kvm/guest_memfd_test.c
> @@ -10,13 +10,17 @@
> #include <errno.h>
> #include <stdio.h>
> #include <fcntl.h>
> +#include <pthread.h>
>
> #include <linux/bitmap.h>
> #include <linux/falloc.h>
> #include <linux/sizes.h>
> +#include <linux/userfaultfd.h>
> #include <sys/mman.h>
> #include <sys/types.h>
> #include <sys/stat.h>
> +#include <sys/syscall.h>
> +#include <sys/ioctl.h>
>
> #include "kvm_util.h"
> #include "test_util.h"
> @@ -254,6 +258,104 @@ static void test_guest_memfd_flags(struct kvm_vm *vm)
> }
> }
>
> +struct fault_args {
> + char *addr;
> + volatile char value;
> +};
> +
> +static void *fault_thread_fn(void *arg)
> +{
> + struct fault_args *args = arg;
> +
> + /* Trigger page fault */
> + args->value = *args->addr;
> + return NULL;
> +}
> +
> +static void test_uffd_minor(int fd, size_t total_size)
> +{
> + struct uffdio_api uffdio_api = {
> + .api = UFFD_API,
> + .features = UFFD_FEATURE_MINOR_GENERIC,
> + };
> + struct uffdio_register uffd_reg;
> + struct uffdio_continue uffd_cont;
> + struct uffd_msg msg;
> + struct fault_args args;
> + pthread_t fault_thread;
> + void *mem, *mem_nofault, *buf = NULL;
> + int uffd, ret;
> + off_t offset = page_size;
> + void *fault_addr;
> +
> + ret = posix_memalign(&buf, page_size, total_size);
> + TEST_ASSERT_EQ(ret, 0);
> +
> + memset(buf, 0xaa, total_size);
> +
> + uffd = syscall(__NR_userfaultfd, O_CLOEXEC);
> + TEST_ASSERT(uffd != -1, "userfaultfd creation should succeed");
> +
> + ret = ioctl(uffd, UFFDIO_API, &uffdio_api);
> + TEST_ASSERT(ret != -1, "ioctl(UFFDIO_API) should succeed");
> +
> + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> + TEST_ASSERT(mem != MAP_FAILED, "mmap should succeed");
> +
> + mem_nofault = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
> + TEST_ASSERT(mem_nofault != MAP_FAILED, "mmap should succeed");
> +
> + uffd_reg.range.start = (unsigned long)mem;
> + uffd_reg.range.len = total_size;
> + uffd_reg.mode = UFFDIO_REGISTER_MODE_MINOR;
> + ret = ioctl(uffd, UFFDIO_REGISTER, &uffd_reg);
> + TEST_ASSERT(ret != -1, "ioctl(UFFDIO_REGISTER) should succeed");
> +
> + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
> + offset, page_size);
> + TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed");
> +
> + fault_addr = mem + offset;
> + args.addr = fault_addr;
> +
> + ret = pthread_create(&fault_thread, NULL, fault_thread_fn, &args);
> + TEST_ASSERT(ret == 0, "pthread_create should succeed");
> +
> + ret = read(uffd, &msg, sizeof(msg));
> + TEST_ASSERT(ret != -1, "read from userfaultfd should succeed");
> + TEST_ASSERT(msg.event == UFFD_EVENT_PAGEFAULT, "event type should be pagefault");
> + TEST_ASSERT((void *)(msg.arg.pagefault.address & ~(page_size - 1)) == fault_addr,
> + "pagefault should occur at expected address");
> +
> + memcpy(mem_nofault + offset, buf + offset, page_size);
> +
> + uffd_cont.range.start = (unsigned long)fault_addr;
> + uffd_cont.range.len = page_size;
> + uffd_cont.mode = 0;
> + ret = ioctl(uffd, UFFDIO_CONTINUE, &uffd_cont);
> + TEST_ASSERT(ret != -1, "ioctl(UFFDIO_CONTINUE) should succeed");
> +
> + /*
> + * wait for fault_thread to finish to make sure fault happened and was
> + * resolved before we verify the values
> + */
> + ret = pthread_join(fault_thread, NULL);
> + TEST_ASSERT(ret == 0, "pthread_join should succeed");
> +
> + TEST_ASSERT(args.value == *(char *)(mem_nofault + offset),
> + "memory should contain the value that was copied");
> + TEST_ASSERT(args.value == *(char *)(mem + offset),
> + "no further fault is expected");
> +
> + ret = munmap(mem_nofault, total_size);
> + TEST_ASSERT(!ret, "munmap should succeed");
> +
> + ret = munmap(mem, total_size);
> + TEST_ASSERT(!ret, "munmap should succeed");
> + free(buf);
> + close(uffd);
> +}
> +
> #define gmem_test(__test, __vm, __flags) \
> do { \
> int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags); \
> @@ -273,6 +375,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
> if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) {
> gmem_test(mmap_supported, vm, flags);
> gmem_test(fault_overflow, vm, flags);
> + gmem_test(uffd_minor, vm, flags);
> } else {
> gmem_test(fault_private, vm, flags);
> }
> --
> 2.50.1
>
Powered by blists - more mailing lists