[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230117023335.1690727-33-Liam.Howlett@oracle.com>
Date: Tue, 17 Jan 2023 02:34:20 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>
CC: Liam Howlett <liam.howlett@...cle.com>,
Liam Howlett <liam.howlett@...cle.com>
Subject: [PATCH v3 32/48] mm: Pass through vma iterator to __vma_adjust()
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Pass the vma iterator through to __vma_adjust() so the state can be
updated.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
include/linux/mm.h | 6 ++++--
mm/mmap.c | 31 +++++++++++++++----------------
2 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 28973a3941a4..294894969cd9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2822,13 +2822,15 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand);
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
{
- return __vma_adjust(vma, start, end, pgoff, insert, NULL);
+ VMA_ITERATOR(vmi, vma->vm_mm, start);
+
+ return __vma_adjust(&vmi, vma, start, end, pgoff, insert, NULL);
}
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
diff --git a/mm/mmap.c b/mm/mmap.c
index b8e8d5edb5ed..4b4d7b611c3e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -579,9 +579,9 @@ inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
* are necessary. The "insert" vma (if any) is to be inserted
* before we drop the necessary locks.
*/
-int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand)
+int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *insert, struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next_next = NULL; /* uninit var warning */
@@ -594,7 +594,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
bool vma_changed = false;
long adjust_next = 0;
int remove_next = 0;
- VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *exporter = NULL, *importer = NULL;
if (next && !insert) {
@@ -679,7 +678,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
}
- if (vma_iter_prealloc(&vmi, vma))
+ if (vma_iter_prealloc(vmi, vma))
return -ENOMEM;
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
@@ -725,7 +724,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (start != vma->vm_start) {
if ((vma->vm_start < start) &&
(!insert || (insert->vm_end != start))) {
- vma_iter_clear(&vmi, vma->vm_start, start);
+ vma_iter_clear(vmi, vma->vm_start, start);
VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
} else {
vma_changed = true;
@@ -735,8 +734,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (end != vma->vm_end) {
if (vma->vm_end > end) {
if (!insert || (insert->vm_start != end)) {
- vma_iter_clear(&vmi, end, vma->vm_end);
- vma_iter_set(&vmi, vma->vm_end);
+ vma_iter_clear(vmi, end, vma->vm_end);
+ vma_iter_set(vmi, vma->vm_end);
VM_WARN_ON(insert &&
insert->vm_end < vma->vm_end);
}
@@ -747,13 +746,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
if (vma_changed)
- vma_iter_store(&vmi, vma);
+ vma_iter_store(vmi, vma);
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
- vma_iter_store(&vmi, next);
+ vma_iter_store(vmi, next);
}
if (file) {
@@ -773,7 +772,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- vma_iter_store(&vmi, insert);
+ vma_iter_store(vmi, insert);
mm->map_count++;
}
@@ -819,7 +818,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (insert && file)
uprobe_mmap(insert);
- vma_iter_free(&vmi);
+ vma_iter_free(vmi);
validate_mm(mm);
return 0;
@@ -1013,20 +1012,20 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
if (merge_prev && merge_next &&
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) { /* cases 1, 6 */
- err = __vma_adjust(prev, prev->vm_start,
+ err = __vma_adjust(vmi, prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL,
prev);
res = prev;
} else if (merge_prev) { /* cases 2, 5, 7 */
- err = __vma_adjust(prev, prev->vm_start,
+ err = __vma_adjust(vmi, prev, prev->vm_start,
end, prev->vm_pgoff, NULL, prev);
res = prev;
} else if (merge_next) {
if (prev && addr < prev->vm_end) /* case 4 */
- err = __vma_adjust(prev, prev->vm_start,
+ err = __vma_adjust(vmi, prev, prev->vm_start,
addr, prev->vm_pgoff, NULL, next);
else /* cases 3, 8 */
- err = __vma_adjust(mid, addr, next->vm_end,
+ err = __vma_adjust(vmi, mid, addr, next->vm_end,
next->vm_pgoff - pglen, NULL, next);
res = next;
}
--
2.35.1
Powered by blists - more mailing lists