[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260115182720.1691130-8-Liam.Howlett@oracle.com>
Date: Thu, 15 Jan 2026 13:27:17 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Suren Baghdasaryan <surenb@...gle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Pedro Falcato <pfalcato@...e.de>, David Hildenbrand <david@...hat.com>,
Vlastimil Babka <vbabka@...e.cz>, Michal Hocko <mhocko@...e.com>,
Jann Horn <jannh@...gle.com>, shikemeng@...weicloud.com,
kasong@...cent.com, nphamcs@...il.com, bhe@...hat.com,
baohua@...nel.org, chrisl@...nel.org,
Matthew Wilcox <willy@...radead.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH v2 07/10] mm: Introduce unmap_desc struct to reduce function arguments
The unmap_region code uses a number of arguments that could use better
documentation. With the addition of a descriptor for unmap (called
unmap_desc), the arguments can be more self-documenting and increase the
descriptions within the declaration.
No functional change intended
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Reviewed-by: Pedro Falcato <pfalcato@...e.de>
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
mm/mmap.c | 14 ++++++++++----
mm/vma.c | 25 +++++++++++--------------
mm/vma.h | 35 ++++++++++++++++++++++++++++++-----
3 files changed, 51 insertions(+), 23 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 48dae3d48e46f..4500e61a0d5e4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1876,11 +1876,17 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (end) {
vma_iter_set(&vmi, 0);
tmp = vma_next(&vmi);
+ UNMAP_STATE(unmap, &vmi, /* first = */ tmp,
+ /* vma_start = */ 0, /* vma_end = */ end,
+ /* prev = */ NULL, /* next = */ NULL);
+
+ /*
+ * Don't iterate over vmas beyond the failure point for
+ * both unmap_vma() and free_pgtables().
+ */
+ unmap.tree_end = end;
flush_cache_mm(mm);
- unmap_region(&vmi.mas, /* vma = */ tmp,
- /* vma_start = */ 0, /* vma_end = */ end,
- /* pg_end = */ end, /* prev = */ NULL,
- /* next = */ NULL);
+ unmap_region(&unmap);
charge = tear_down_vmas(mm, &vmi, tmp, end);
vm_unacct_memory(charge);
}
diff --git a/mm/vma.c b/mm/vma.c
index b92383e5eebd1..75c68c74c062e 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -472,21 +472,19 @@ void remove_vma(struct vm_area_struct *vma)
*
* Called with the mm semaphore held.
*/
-void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
- unsigned long vma_start, unsigned long vma_end,
- unsigned long pg_max, struct vm_area_struct *prev,
- struct vm_area_struct *next)
+void unmap_region(struct unmap_desc *unmap)
{
- struct mm_struct *mm = vma->vm_mm;
+ struct mm_struct *mm = unmap->first->vm_mm;
+ struct ma_state *mas = unmap->mas;
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
- unmap_vmas(&tlb, mas, vma, vma_start, vma_end, vma_end);
- mas_set(mas, vma->vm_end);
- free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- pg_max, next ? next->vm_start : USER_PGTABLES_CEILING,
- /* mm_wr_locked = */ true);
+ unmap_vmas(&tlb, mas, unmap->first, unmap->vma_start, unmap->vma_end,
+ unmap->vma_end);
+ mas_set(mas, unmap->tree_reset);
+ free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
+ unmap->tree_end, unmap->mm_wr_locked);
tlb_finish_mmu(&tlb);
}
@@ -2463,15 +2461,14 @@ static int __mmap_new_file_vma(struct mmap_state *map,
error = mmap_file(vma->vm_file, vma);
if (error) {
+ UNMAP_STATE(unmap, vmi, vma, vma->vm_start, vma->vm_end,
+ map->prev, map->next);
fput(vma->vm_file);
vma->vm_file = NULL;
vma_iter_set(vmi, vma->vm_end);
/* Undo any partial mapping done by a device driver. */
- unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
- map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
- map->prev, map->next);
-
+ unmap_region(&unmap);
return error;
}
diff --git a/mm/vma.h b/mm/vma.h
index 7c2c95fef240b..cca7553c7d641 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -155,6 +155,35 @@ struct vma_merge_struct {
};
+struct unmap_desc {
+ struct ma_state *mas; /* the maple state point to the first vma */
+ struct vm_area_struct *first; /* The first vma */
+ unsigned long pg_start; /* The first pagetable address to free (floor) */
+ unsigned long pg_end; /* The last pagetable address to free (ceiling) */
+ unsigned long vma_start; /* The min vma address */
+ unsigned long vma_end; /* The max vma address */
+ unsigned long tree_end; /* Maximum for the vma tree search */
+ unsigned long tree_reset; /* Where to reset the vma tree walk */
+ bool mm_wr_locked; /* If the mmap write lock is held */
+};
+
+#define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next) \
+ struct unmap_desc name = { \
+ .mas = &(_vmi)->mas, \
+ .first = _vma, \
+ .pg_start = _prev ? ((struct vm_area_struct *)_prev)->vm_end : \
+ FIRST_USER_ADDRESS, \
+ .pg_end = _next ? ((struct vm_area_struct *)_next)->vm_start : \
+ USER_PGTABLES_CEILING, \
+ .vma_start = _vma_start, \
+ .vma_end = _vma_end, \
+ .tree_end = _next ? \
+ ((struct vm_area_struct *)_next)->vm_start : \
+ USER_PGTABLES_CEILING, \
+ .tree_reset = _vma->vm_end, \
+ .mm_wr_locked = true, \
+ }
+
static inline bool vmg_nomem(struct vma_merge_struct *vmg)
{
return vmg->state == VMA_MERGE_ERROR_NOMEM;
@@ -262,11 +291,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
bool unlock);
void remove_vma(struct vm_area_struct *vma);
-
-void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
- unsigned long vma_start, unsigned long vma_end,
- unsigned long pg_max, struct vm_area_struct *prev,
- struct vm_area_struct *next);
+void unmap_region(struct unmap_desc *unmap);
/**
* vma_modify_flags() - Perform any necessary split/merge in preparation for
--
2.47.3
Powered by blists - more mailing lists