[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260127192936.1250096-8-rppt@kernel.org>
Date: Tue, 27 Jan 2026 21:29:26 +0200
From: Mike Rapoport <rppt@...nel.org>
To: linux-mm@...ck.org
Cc: Andrea Arcangeli <aarcange@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
David Hildenbrand <david@...hat.com>,
Hugh Dickins <hughd@...gle.com>,
James Houghton <jthoughton@...gle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Michal Hocko <mhocko@...e.com>,
Mike Rapoport <rppt@...nel.org>,
Muchun Song <muchun.song@...ux.dev>,
Nikita Kalyazin <kalyazin@...zon.com>,
Oscar Salvador <osalvador@...e.de>,
Paolo Bonzini <pbonzini@...hat.com>,
Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Shuah Khan <shuah@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>,
linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: [PATCH RFC 07/17] userfaultfd: introduce vm_uffd_ops
From: "Mike Rapoport (Microsoft)" <rppt@...nel.org>
Current userfaultfd implementation works only with memory managed by
core MM: anonymous, shmem and hugetlb.
First, there is no fundamental reason to limit userfaultfd support only
to the core memory types and userfaults can be handled similarly to
regular page faults provided a VMA owner implements appropriate
callbacks.
Second, historically various code paths were conditioned on
vma_is_anonymous(), vma_is_shmem() and is_vm_hugetlb_page() and some of
these conditions can be expressed as operations implemented by a
particular memory type.
Introduce vm_uffd_ops extension to vm_operations_struct that will
delegate memory type specific operations to a VMA owner.
Operations for anonymous memory are handled internally in userfaultfd
using anon_uffd_ops that implicitly assigned to anonymous VMAs.
Start with a single operation, ->can_userfault() that will verify that a
VMA meets requirements for userfaultfd support at registration time.
Implement that method for anonymous, shmem and hugetlb and move relevant
parts of vma_can_userfault() into the new callbacks.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@...nel.org>
---
include/linux/mm.h | 5 +++++
include/linux/userfaultfd_k.h | 6 +++++
mm/hugetlb.c | 21 ++++++++++++++++++
mm/shmem.c | 23 ++++++++++++++++++++
mm/userfaultfd.c | 41 ++++++++++++++++++++++-------------
5 files changed, 81 insertions(+), 15 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 15076261d0c2..3c2caff646c3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -732,6 +732,8 @@ struct vm_fault {
*/
};
+struct vm_uffd_ops;
+
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -817,6 +819,9 @@ struct vm_operations_struct {
struct page *(*find_normal_page)(struct vm_area_struct *vma,
unsigned long addr);
#endif /* CONFIG_FIND_NORMAL_PAGE */
+#ifdef CONFIG_USERFAULTFD
+ const struct vm_uffd_ops *uffd_ops;
+#endif
};
#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index a49cf750e803..56e85ab166c7 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -80,6 +80,12 @@ struct userfaultfd_ctx {
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
+/* VMA userfaultfd operations */
+struct vm_uffd_ops {
+ /* Checks if a VMA can support userfaultfd */
+ bool (*can_userfault)(struct vm_area_struct *vma, vm_flags_t vm_flags);
+};
+
/* A combined operation mode + behavior flags. */
typedef unsigned int __bitwise uffd_flags_t;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51273baec9e5..909131910c43 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4797,6 +4797,24 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
return 0;
}
+#ifdef CONFIG_USERFAULTFD
+static bool hugetlb_can_userfault(struct vm_area_struct *vma,
+ vm_flags_t vm_flags)
+{
+ /*
+ * If user requested uffd-wp but not enabled pte markers for
+ * uffd-wp, then hugetlb is not supported.
+ */
+ if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP))
+ return false;
+ return true;
+}
+
+static const struct vm_uffd_ops hugetlb_uffd_ops = {
+ .can_userfault = hugetlb_can_userfault,
+};
+#endif
+
/*
* When a new function is introduced to vm_operations_struct and added
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
@@ -4810,6 +4828,9 @@ const struct vm_operations_struct hugetlb_vm_ops = {
.close = hugetlb_vm_op_close,
.may_split = hugetlb_vm_op_split,
.pagesize = hugetlb_vm_op_pagesize,
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &hugetlb_uffd_ops,
+#endif
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
diff --git a/mm/shmem.c b/mm/shmem.c
index ec6c01378e9d..9b82cda271c4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5290,6 +5290,23 @@ static const struct super_operations shmem_ops = {
#endif
};
+#ifdef CONFIG_USERFAULTFD
+static bool shmem_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ /*
+ * If user requested uffd-wp but not enabled pte markers for
+ * uffd-wp, then shmem is not supported.
+ */
+ if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP))
+ return false;
+ return true;
+}
+
+static const struct vm_uffd_ops shmem_uffd_ops = {
+ .can_userfault = shmem_can_userfault,
+};
+#endif
+
static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
.map_pages = filemap_map_pages,
@@ -5297,6 +5314,9 @@ static const struct vm_operations_struct shmem_vm_ops = {
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &shmem_uffd_ops,
+#endif
};
static const struct vm_operations_struct shmem_anon_vm_ops = {
@@ -5306,6 +5326,9 @@ static const struct vm_operations_struct shmem_anon_vm_ops = {
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
+#ifdef CONFIG_USERFAULTFD
+ .uffd_ops = &shmem_uffd_ops,
+#endif
};
int shmem_init_fs_context(struct fs_context *fc)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 786f0a245675..d035f5e17f07 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -34,6 +34,25 @@ struct mfill_state {
pmd_t *pmd;
};
+static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+ /* anonymous memory does not support MINOR mode */
+ if (vm_flags & VM_UFFD_MINOR)
+ return false;
+ return true;
+}
+
+static const struct vm_uffd_ops anon_uffd_ops = {
+ .can_userfault = anon_can_userfault,
+};
+
+static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return &anon_uffd_ops;
+ return vma->vm_ops ? vma->vm_ops->uffd_ops : NULL;
+}
+
static __always_inline
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
{
@@ -2019,13 +2038,15 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags,
bool wp_async)
{
- vm_flags &= __VM_UFFD_FLAGS;
+ const struct vm_uffd_ops *ops = vma_uffd_ops(vma);
- if (vma->vm_flags & VM_DROPPABLE)
+ /* only VMAs that implement vm_uffd_ops are supported */
+ if (!ops)
return false;
- if ((vm_flags & VM_UFFD_MINOR) &&
- (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
+ vm_flags &= __VM_UFFD_FLAGS;
+
+ if (vma->vm_flags & VM_DROPPABLE)
return false;
/*
@@ -2035,18 +2056,8 @@ bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags,
if (wp_async && (vm_flags == VM_UFFD_WP))
return true;
- /*
- * If user requested uffd-wp but not enabled pte markers for
- * uffd-wp, then shmem & hugetlbfs are not supported but only
- * anonymous.
- */
- if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) &&
- !vma_is_anonymous(vma))
- return false;
-
/* By default, allow any of anon|shmem|hugetlb */
- return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
- vma_is_shmem(vma);
+ return ops->can_userfault(vma, vm_flags);
}
static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
--
2.51.0
Powered by blists - more mailing lists