[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220218122019.130274-4-matenajakub@gmail.com>
Date: Fri, 18 Feb 2022 13:20:18 +0100
From: Jakub Matěna <matenajakub@...il.com>
To: linux-mm@...ck.org
Cc: patches@...ts.linux.dev, linux-kernel@...r.kernel.org,
vbabka@...e.cz, mhocko@...nel.org, mgorman@...hsingularity.net,
willy@...radead.org, liam.howlett@...cle.com, hughd@...gle.com,
kirill@...temov.name, riel@...riel.com, rostedt@...dmis.org,
peterz@...radead.org,
Jakub Matěna <matenajakub@...il.com>
Subject: [RFC PATCH 3/4] [PATCH 3/4] mm: enable merging of VMAs with different anon_vmas
Enable merging of a VMA even when it is linked to different
anon_vma than the one it is being merged to, but only if the VMA
in question does not share any page with a parent or child process.
Every anonymous page stores a pointer to its anon_vma in the parameter
mapping, which is now updated as part of the merge process.
Signed-off-by: Jakub Matěna <matenajakub@...il.com>
---
include/linux/rmap.h | 17 ++++++++++++++++-
mm/mmap.c | 15 ++++++++++++++-
mm/rmap.c | 40 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 70 insertions(+), 2 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index e704b1a4c06c..c8508a4ebc46 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -137,10 +137,13 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
*/
void anon_vma_init(void); /* create anon_vma_cachep */
int __anon_vma_prepare(struct vm_area_struct *);
+void reconnect_pages(struct vm_area_struct *vma, struct vm_area_struct *next);
void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
+bool rbt_no_children(struct anon_vma *av);
+
static inline int anon_vma_prepare(struct vm_area_struct *vma)
{
if (likely(vma->anon_vma))
@@ -149,10 +152,22 @@ static inline int anon_vma_prepare(struct vm_area_struct *vma)
return __anon_vma_prepare(vma);
}
+/**
+ * anon_vma_merge() - Merge anon_vmas of the given VMAs
+ * @vma: VMA being merged to
+ * @next: VMA being merged
+ */
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
- VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
+ struct anon_vma *anon_vma1 = vma->anon_vma;
+ struct anon_vma *anon_vma2 = next->anon_vma;
+
+ VM_BUG_ON_VMA(anon_vma1 && anon_vma2 && anon_vma1 != anon_vma2 &&
+ ((anon_vma2 != anon_vma2->root)
+ || !rbt_no_children(anon_vma2)), vma);
+
+ reconnect_pages(vma, next);
unlink_anon_vmas(next);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 8d253b46b349..ed91d0cd2111 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1065,7 +1065,20 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
if ((!anon_vma1 || !anon_vma2) && (!vma ||
list_is_singular(&vma->anon_vma_chain)))
return 1;
- return anon_vma1 == anon_vma2;
+ if (anon_vma1 == anon_vma2)
+ return 1;
+ /*
+ * Different anon_vma but not shared by several processes
+ */
+ else if ((anon_vma1 && anon_vma2) &&
+ (anon_vma1 == anon_vma1->root)
+ && (rbt_no_children(anon_vma1)))
+ return 1;
+ /*
+ * Different anon_vma and shared -> unmergeable
+ */
+ else
+ return 0;
}
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index 6a1e8c7f6213..1093b518b0be 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -387,6 +387,46 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
return -ENOMEM;
}
+/**
+ * reconnect_pages() - Reconnect physical pages from old to vma
+ * @vma: VMA to newly contain all physical pages of old
+ * @old: old VMA being merged to vma
+ */
+void reconnect_pages(struct vm_area_struct *vma, struct vm_area_struct *old)
+{
+ struct anon_vma *anon_vma1 = vma->anon_vma;
+ struct anon_vma *anon_vma2 = old->anon_vma;
+ unsigned long pg_iter;
+ int pg_iters;
+
+ if (anon_vma1 == anon_vma2 || anon_vma1 == NULL || anon_vma2 == NULL)
+ return; /* Nothing to do */
+
+ /* Modify page->mapping for all pages in old */
+ pg_iter = 0;
+ pg_iters = (old->vm_end - old->vm_start) >> PAGE_SHIFT;
+
+ for (; pg_iter < pg_iters; ++pg_iter) {
+ /* Get the physical page */
+ unsigned long shift = pg_iter << PAGE_SHIFT;
+ struct page *phys_page = follow_page(old, old->vm_start + shift, FOLL_GET);
+ struct anon_vma *page_anon_vma;
+
+ /* Do some checks and lock the page */
+ if (phys_page == NULL)
+ continue; /* Virtual memory page is not mapped */
+ lock_page(phys_page);
+ page_anon_vma = page_get_anon_vma(phys_page);
+ if (page_anon_vma != NULL) { /* NULL in case of ZERO_PAGE */
+ VM_BUG_ON_VMA(page_anon_vma != old->anon_vma, old);
+ /* Update physical page's mapping */
+ page_move_anon_rmap(phys_page, vma);
+ }
+ unlock_page(phys_page);
+ put_page(phys_page);
+ }
+}
+
void unlink_anon_vmas(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc, *next;
--
2.34.1
Powered by blists - more mailing lists