[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c684d91464a438d6e31172c9450416a373f10649.1762795245.git.lorenzo.stoakes@oracle.com>
Date: Mon, 10 Nov 2025 17:22:57 +0000
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...nel.org>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>, Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Jann Horn <jannh@...gle.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 1/2] mm: rename walk_page_range_mm()
Make it clear we're referencing an unsafe variant of this function
explicitly.
This is laying the foundation for exposing more such functions and
maintaining a consistent naming scheme.
As a part of this change, rename check_ops_valid() to check_ops_safe() for
consistency.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
mm/internal.h | 2 +-
mm/madvise.c | 4 ++--
mm/pagewalk.c | 22 +++++++++++-----------
3 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 0af87f6c2889..479234b39394 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1652,7 +1652,7 @@ static inline void accept_page(struct page *page)
#endif /* CONFIG_UNACCEPTED_MEMORY */
/* pagewalk.c */
-int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
+int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
diff --git a/mm/madvise.c b/mm/madvise.c
index de918b107cfc..7b938ff44be2 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1171,8 +1171,8 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior)
unsigned long nr_pages = 0;
/* Returns < 0 on error, == 0 if success, > 0 if zap needed. */
- err = walk_page_range_mm(vma->vm_mm, range->start, range->end,
- &guard_install_walk_ops, &nr_pages);
+ err = walk_page_range_mm_unsafe(vma->vm_mm, range->start,
+ range->end, &guard_install_walk_ops, &nr_pages);
if (err < 0)
return err;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 6cace2c8814a..ab29b16abd2c 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -452,7 +452,7 @@ static inline void process_vma_walk_lock(struct vm_area_struct *vma,
* We usually restrict the ability to install PTEs, but this functionality is
* available to internal memory management code and provided in mm/internal.h.
*/
-int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
+int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
@@ -518,10 +518,10 @@ int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
* This check is performed on all functions which are parameterised by walk
* operations and exposed in include/linux/pagewalk.h.
*
- * Internal memory management code can use the walk_page_range_mm() function to
- * be able to use all page walking operations.
+ * Internal memory management code can use *_unsafe() functions to be able to
+ * use all page walking operations.
*/
-static bool check_ops_valid(const struct mm_walk_ops *ops)
+static bool check_ops_safe(const struct mm_walk_ops *ops)
{
/*
* The installation of PTEs is solely under the control of memory
@@ -579,10 +579,10 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
- if (!check_ops_valid(ops))
+ if (!check_ops_safe(ops))
return -EINVAL;
- return walk_page_range_mm(mm, start, end, ops, private);
+ return walk_page_range_mm_unsafe(mm, start, end, ops, private);
}
/**
@@ -639,7 +639,7 @@ int walk_kernel_page_table_range_lockless(unsigned long start, unsigned long end
if (start >= end)
return -EINVAL;
- if (!check_ops_valid(ops))
+ if (!check_ops_safe(ops))
return -EINVAL;
return walk_pgd_range(start, end, &walk);
@@ -678,7 +678,7 @@ int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
pgd, private);
if (start >= end || !walk.mm)
return -EINVAL;
- if (!check_ops_valid(ops))
+ if (!check_ops_safe(ops))
return -EINVAL;
/*
@@ -709,7 +709,7 @@ int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
return -EINVAL;
if (start < vma->vm_start || end > vma->vm_end)
return -EINVAL;
- if (!check_ops_valid(ops))
+ if (!check_ops_safe(ops))
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
@@ -729,7 +729,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
if (!walk.mm)
return -EINVAL;
- if (!check_ops_valid(ops))
+ if (!check_ops_safe(ops))
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
@@ -780,7 +780,7 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
unsigned long start_addr, end_addr;
int err = 0;
- if (!check_ops_valid(ops))
+ if (!check_ops_safe(ops))
return -EINVAL;
lockdep_assert_held(&mapping->i_mmap_rwsem);
--
2.51.0
Powered by blists - more mailing lists