[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230216051750.3125598-11-surenb@google.com>
Date: Wed, 15 Feb 2023 21:17:25 -0800
From: Suren Baghdasaryan <surenb@...gle.com>
To: akpm@...ux-foundation.org
Cc: michel@...pinasse.org, jglisse@...gle.com, mhocko@...e.com,
vbabka@...e.cz, hannes@...xchg.org, mgorman@...hsingularity.net,
dave@...olabs.net, willy@...radead.org, liam.howlett@...cle.com,
peterz@...radead.org, ldufour@...ux.ibm.com, paulmck@...nel.org,
mingo@...hat.com, will@...nel.org, luto@...nel.org,
songliubraving@...com, peterx@...hat.com, david@...hat.com,
dhowells@...hat.com, hughd@...gle.com, bigeasy@...utronix.de,
kent.overstreet@...ux.dev, punit.agrawal@...edance.com,
lstoakes@...il.com, peterjung1337@...il.com, rientjes@...gle.com,
chriscli@...gle.com, axelrasmussen@...gle.com, joelaf@...gle.com,
minchan@...gle.com, rppt@...nel.org, jannh@...gle.com,
shakeelb@...gle.com, tatashin@...gle.com, edumazet@...gle.com,
gthelen@...gle.com, gurua@...gle.com, arjunroy@...gle.com,
soheil@...gle.com, leewalsh@...gle.com, posk@...gle.com,
michalechner92@...glemail.com, linux-mm@...ck.org,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org, x86@...nel.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com,
Suren Baghdasaryan <surenb@...gle.com>
Subject: [PATCH v3 10/35] mm: rcu safe VMA freeing
From: Michel Lespinasse <michel@...pinasse.org>
This prepares for page faults handling under VMA lock, looking up VMAs
under protection of an rcu read lock, instead of the usual mmap read lock.
Signed-off-by: Michel Lespinasse <michel@...pinasse.org>
Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
---
include/linux/mm_types.h | 13 ++++++++++---
kernel/fork.c | 20 +++++++++++++++++++-
2 files changed, 29 insertions(+), 4 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3fd5305dbbf9..fb4e2afad787 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -480,9 +480,16 @@ struct anon_vma_name {
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
+ union {
+ struct {
+ /* VMA covers [vm_start; vm_end) addresses within mm */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
+ struct rcu_head vm_rcu; /* Used for deferred freeing. */
+#endif
+ };
struct mm_struct *vm_mm; /* The address space we belong to. */
diff --git a/kernel/fork.c b/kernel/fork.c
index 5f23d5e03362..314d51eb91da 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -479,12 +479,30 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
return new;
}
-void vm_area_free(struct vm_area_struct *vma)
+static void __vm_area_free(struct vm_area_struct *vma)
{
free_anon_vma_name(vma);
kmem_cache_free(vm_area_cachep, vma);
}
+#ifdef CONFIG_PER_VMA_LOCK
+static void vm_area_free_rcu_cb(struct rcu_head *head)
+{
+ struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
+ vm_rcu);
+ __vm_area_free(vma);
+}
+#endif
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_PER_VMA_LOCK
+ call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
+#else
+ __vm_area_free(vma);
+#endif
+}
+
static void account_kernel_stack(struct task_struct *tsk, int account)
{
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
--
2.39.1
Powered by blists - more mailing lists