[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <f5e27d1bb9259b7ef7a45099a838e47fbaaad3ab.1679431180.git.lstoakes@gmail.com>
Date: Tue, 21 Mar 2023 20:45:58 +0000
From: Lorenzo Stoakes <lstoakes@...il.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
maple-tree@...ts.infradead.org, Vernon Yang <vernon2gm@...il.com>,
Lorenzo Stoakes <lstoakes@...il.com>
Subject: [PATCH v2 4/4] mm/mmap/vma_merge: init cleanup, be explicit about the non-mergeable case
Reorder the initial variables sensibly and set vma_start and vm_pgoff there
rather than later so all initial values are set at the same time meaning we
don't have to set these later.
Rather than setting err = -1 and only resetting if we hit merge cases,
explicitly check the non-mergeable case to make it abundantly clear that we
only proceed with the rest if something is mergeable, default err to 0 and
only update if an error might occur.
Move the merge_prev, merge_next cases closer to the logic determining curr,
next.
This has no functional impact.
Signed-off-by: Lorenzo Stoakes <lstoakes@...il.com>
---
mm/mmap.c | 55 ++++++++++++++++++++++++++-----------------------------
1 file changed, 26 insertions(+), 29 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 7aec49c3bc74..d60cb0b7ae15 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -909,18 +909,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
struct anon_vma_name *anon_name)
{
- pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
- pgoff_t vma_pgoff;
struct vm_area_struct *curr, *next, *res;
struct vm_area_struct *vma, *adjust, *remove, *remove2;
- int err = -1;
+ struct vma_prepare vp;
+ int err = 0;
bool merge_prev = false;
bool merge_next = false;
bool vma_expanded = false;
- struct vma_prepare vp;
+ unsigned long vma_start = prev ? prev->vm_start : addr;
unsigned long vma_end = end;
+ pgoff_t vma_pgoff = prev ? prev->vm_pgoff : 0;
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
long adj_start = 0;
- unsigned long vma_start = addr;
validate_mm(mm);
/*
@@ -940,6 +940,23 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
/* Is there a VMA next to a hole (case 1 - 3) or prev (4)? */
next = vma_lookup(mm, end);
+ /* Can we merge the predecessor? */
+ if (prev && addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
+ && can_vma_merge_after(prev, vm_flags, anon_vma, file,
+ pgoff, vm_userfaultfd_ctx, anon_name)) {
+ merge_prev = true;
+ vma_prev(vmi);
+ }
+
+ /* Can we merge the successor? */
+ merge_next = next && mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen,
+ vm_userfaultfd_ctx, anon_name);
+
+ if (!merge_prev && !merge_next)
+ return NULL; /* Not mergeable. */
+
/*
* By default, we return prev. Cases 3, 4, 8 will instead return next
* and cases 3, 8 will also update vma to point at next.
@@ -951,26 +968,6 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
VM_WARN_ON(addr >= end);
- if (prev) {
- vma_start = prev->vm_start;
- vma_pgoff = prev->vm_pgoff;
- /* Can we merge the predecessor? */
- if (prev->vm_end == addr && mpol_equal(vma_policy(prev), policy)
- && can_vma_merge_after(prev, vm_flags, anon_vma, file,
- pgoff, vm_userfaultfd_ctx, anon_name)) {
- merge_prev = true;
- vma_prev(vmi);
- }
- }
-
- /* Can we merge the successor? */
- if (next && mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags,
- anon_vma, file, pgoff+pglen,
- vm_userfaultfd_ctx, anon_name)) {
- merge_next = true;
- }
-
remove = remove2 = adjust = NULL;
/* Can we merge both the predecessor and the successor? */
if (merge_prev && merge_next &&
@@ -985,7 +982,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
err = dup_anon_vma(prev, curr);
}
} else if (merge_prev) {
- err = 0; /* case 2 */
+ /* case 2 */
if (curr) {
err = dup_anon_vma(prev, curr);
if (end == curr->vm_end) { /* case 7 */
@@ -995,7 +992,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
adj_start = (end - curr->vm_start);
}
}
- } else if (merge_next) {
+ } else { /* merge_next */
res = next;
if (prev && addr < prev->vm_end) { /* case 4 */
vma_end = addr;
@@ -1011,7 +1008,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_start = addr;
vma_end = next->vm_end;
vma_pgoff = next->vm_pgoff;
- err = 0;
+
if (curr) { /* case 8 */
vma_pgoff = curr->vm_pgoff;
remove = curr;
@@ -1020,7 +1017,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
}
}
- /* Cannot merge or error in anon_vma clone */
+ /* Error in anon_vma clone. */
if (err)
return NULL;
--
2.39.2
Powered by blists - more mailing lists