[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230117023335.1690727-29-Liam.Howlett@oracle.com>
Date: Tue, 17 Jan 2023 02:34:18 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>
CC: Liam Howlett <liam.howlett@...cle.com>
Subject: [PATCH v3 28/48] nommu: Pass through vma iterator to shrink_vma()
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Rename the function to vmi_shrink_vma() indicate it takes the vma
iterator. Use the iterator to preallocate and drop the delete function.
The maple tree is able to do the modification easier than the linked
list and rbtree, so just clear the necessary area in the tree.
add_vma_to_mm() is no longer used, so drop this function.
vmi_add_vma_to_mm() is now only used once, so inline this function into
do_mmap().
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
mm/nommu.c | 63 +++++++++++++++---------------------------------------
1 file changed, 17 insertions(+), 46 deletions(-)
diff --git a/mm/nommu.c b/mm/nommu.c
index 595f942c6101..f892af0a6ca3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -560,44 +560,6 @@ static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
}
}
-/*
- * vmi_add_vma_to_mm() - VMA Iterator variant of add_vmi_to_mm().
- * @vmi: The VMA iterator
- * @mm: The mm_struct
- * @vma: The vma to add
- *
- */
-static void vmi_add_vma_to_mm(struct vma_iterator *vmi, struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
- BUG_ON(!vma->vm_region);
-
- setup_vma_to_mm(vma, mm);
- mm->map_count++;
-
- /* add the VMA to the tree */
- vma_iter_store(vmi, vma);
-}
-
-/*
- * add a VMA into a process's mm_struct in the appropriate place in the list
- * and tree and add to the address space's page tree also if not an anonymous
- * page
- * - should be called with mm->mmap_lock held writelocked
- */
-static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- VMA_ITERATOR(vmi, mm, vma->vm_start);
-
- if (vma_iter_prealloc(&vmi, vma)) {
- pr_warn("Allocation of vma tree for process %d failed\n",
- current->pid);
- return -ENOMEM;
- }
- vmi_add_vma_to_mm(&vmi, mm, vma);
- return 0;
-}
-
static void cleanup_vma_from_mm(struct vm_area_struct *vma)
{
vma->vm_mm->map_count--;
@@ -1211,7 +1173,11 @@ unsigned long do_mmap(struct file *file,
current->mm->total_vm += len >> PAGE_SHIFT;
share:
- vmi_add_vma_to_mm(&vmi, current->mm, vma);
+ BUG_ON(!vma->vm_region);
+ setup_vma_to_mm(vma, current->mm);
+ current->mm->map_count++;
+ /* add the VMA to the tree */
+ vma_iter_store(&vmi, vma);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@@ -1396,7 +1362,7 @@ int vmi_split_vma(struct vma_iterator *vmi, struct mm_struct *mm,
* shrink a VMA by removing the specified chunk from either the beginning or
* the end
*/
-static int shrink_vma(struct mm_struct *mm,
+static int vmi_shrink_vma(struct vma_iterator *vmi,
struct vm_area_struct *vma,
unsigned long from, unsigned long to)
{
@@ -1404,14 +1370,19 @@ static int shrink_vma(struct mm_struct *mm,
/* adjust the VMA's pointers, which may reposition it in the MM's tree
* and list */
- if (delete_vma_from_mm(vma))
+ if (vma_iter_prealloc(vmi, vma)) {
+ pr_warn("Allocation of vma tree for process %d failed\n",
+ current->pid);
return -ENOMEM;
- if (from > vma->vm_start)
+ }
+
+ if (from > vma->vm_start) {
+ vma_iter_clear(vmi, from, vma->vm_end);
vma->vm_end = from;
- else
+ } else {
+ vma_iter_clear(vmi, vma->vm_start, to);
vma->vm_start = to;
- if (add_vma_to_mm(mm, vma))
- return -ENOMEM;
+ }
/* cut the backing region down to size */
region = vma->vm_region;
@@ -1488,7 +1459,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
if (ret < 0)
return ret;
}
- return shrink_vma(mm, vma, start, end);
+ return vmi_shrink_vma(&vmi, vma, start, end);
}
erase_whole_vma:
--
2.35.1
Powered by blists - more mailing lists