[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221129164352.3374638-27-Liam.Howlett@oracle.com>
Date: Tue, 29 Nov 2022 16:44:32 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>
CC: Liam Howlett <liam.howlett@...cle.com>,
Liam Howlett <liam.howlett@...cle.com>
Subject: [PATCH 26/43] mmap: Convert __vma_adjust() to use vma iterator
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Use the vma iterator internally for __vma_adjust(). Avoid using the
maple tree interface directly for type safety.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
include/linux/mm.h | 3 --
mm/mmap.c | 75 ++++++++--------------------------------------
2 files changed, 13 insertions(+), 65 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2fa8aae4c4f2..3649acb9b0fb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2717,9 +2717,6 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
-void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
-void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
-
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
diff --git a/mm/mmap.c b/mm/mmap.c
index 0c194a2a568d..71e8e9cfd704 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -492,56 +492,6 @@ static void __vma_link_file(struct vm_area_struct *vma,
flush_dcache_mmap_unlock(mapping);
}
-/*
- * vma_mas_store() - Store a VMA in the maple tree.
- * @vma: The vm_area_struct
- * @mas: The maple state
- *
- * Efficient way to store a VMA in the maple tree when the @mas has already
- * walked to the correct location.
- *
- * Note: the end address is inclusive in the maple tree.
- */
-void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
-{
- trace_vma_store(mas->tree, vma);
- mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
- mas_store_prealloc(mas, vma);
-}
-
-/*
- * vma_mas_remove() - Remove a VMA from the maple tree.
- * @vma: The vm_area_struct
- * @mas: The maple state
- *
- * Efficient way to remove a VMA from the maple tree when the @mas has already
- * been established and points to the correct location.
- * Note: the end address is inclusive in the maple tree.
- */
-void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
-{
- trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1);
- mas->index = vma->vm_start;
- mas->last = vma->vm_end - 1;
- mas_store_prealloc(mas, NULL);
-}
-
-/*
- * vma_mas_szero() - Set a given range to zero. Used when modifying a
- * vm_area_struct start or end.
- *
- * @mm: The struct_mm
- * @start: The start address to zero
- * @end: The end address to zero.
- */
-static inline void vma_mas_szero(struct ma_state *mas, unsigned long start,
- unsigned long end)
-{
- trace_vma_mas_szero(mas->tree, start, end - 1);
- mas_set_range(mas, start, end - 1);
- mas_store_prealloc(mas, NULL);
-}
-
static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
{
VMA_ITERATOR(vmi, mm, 0);
@@ -701,7 +651,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
bool vma_changed = false;
long adjust_next = 0;
int remove_next = 0;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *exporter = NULL, *importer = NULL;
if (next && !insert) {
@@ -786,7 +736,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
}
- if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ if (vma_iter_prealloc(&vmi, vma))
return -ENOMEM;
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
@@ -832,7 +782,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (start != vma->vm_start) {
if ((vma->vm_start < start) &&
(!insert || (insert->vm_end != start))) {
- vma_mas_szero(&mas, vma->vm_start, start);
+ vma_iter_clear(&vmi, vma->vm_start, start);
VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
} else {
vma_changed = true;
@@ -842,8 +792,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (end != vma->vm_end) {
if (vma->vm_end > end) {
if (!insert || (insert->vm_start != end)) {
- vma_mas_szero(&mas, end, vma->vm_end);
- mas_reset(&mas);
+ vma_iter_clear(&vmi, end, vma->vm_end);
+ vma_iter_set(&vmi, vma->vm_end);
VM_WARN_ON(insert &&
insert->vm_end < vma->vm_end);
}
@@ -854,13 +804,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
if (vma_changed)
- vma_mas_store(vma, &mas);
+ vma_iter_store(&vmi, vma);
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
- vma_mas_store(next, &mas);
+ vma_iter_store(&vmi, next);
}
if (file) {
@@ -880,8 +830,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- mas_reset(&mas);
- vma_mas_store(insert, &mas);
+ vma_iter_store(&vmi, insert);
mm->map_count++;
}
@@ -927,7 +876,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (insert && file)
uprobe_mmap(insert);
- mas_destroy(&mas);
+ vma_iter_free(&vmi);
validate_mm(mm);
return 0;
@@ -2058,7 +2007,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
/* Overwrite old entry in mtree. */
- vma_mas_store(vma, &mas);
+ mas_set_range(&mas, vma->vm_start, address - 1);
+ mas_store_prealloc(&mas, vma);
anon_vma_interval_tree_post_update_vma(vma);
spin_unlock(&mm->page_table_lock);
@@ -2140,7 +2090,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
vma->vm_start = address;
vma->vm_pgoff -= grow;
/* Overwrite old entry in mtree. */
- vma_mas_store(vma, &mas);
+ mas_set_range(&mas, address, vma->vm_end - 1);
+ mas_store_prealloc(&mas, vma);
anon_vma_interval_tree_post_update_vma(vma);
spin_unlock(&mm->page_table_lock);
--
2.35.1
Powered by blists - more mailing lists