[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240710192250.4114783-7-Liam.Howlett@oracle.com>
Date: Wed, 10 Jul 2024 15:22:35 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>
Cc: Suren Baghdasaryan <surenb@...gle.com>, Vlastimil Babka <vbabka@...e.cz>,
Lorenzo Stoakes <lstoakes@...il.com>,
Matthew Wilcox <willy@...radead.org>, sidhartha.kumar@...cle.com,
"Paul E . McKenney" <paulmck@...nel.org>,
Bert Karwatzki <spasswolf@....de>, Jiri Olsa <olsajiri@...il.com>,
linux-kernel@...r.kernel.org, Kees Cook <kees@...nel.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Subject: [PATCH v4 06/21] mm/mmap: Change munmap to use vma_munmap_struct() for accounting and surrounding vmas
Clean up the code by changing the munmap operation to use a structure
for the accounting and munmap variables.
Since remove_mt() is only called in one location and the contents will
be reduced to almost nothing. The remains of the function can be added
to vms_complete_munmap_vmas().
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Reviewed-by: Suren Baghdasaryan <surenb@...gle.com>
---
mm/internal.h | 6 ++++
mm/mmap.c | 80 +++++++++++++++++++++++++--------------------------
2 files changed, 46 insertions(+), 40 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 43b3c99c77ba..a22547125c13 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1488,12 +1488,18 @@ struct vma_munmap_struct {
struct vma_iterator *vmi;
struct mm_struct *mm;
struct vm_area_struct *vma; /* The first vma to munmap */
+ struct vm_area_struct *prev; /* vma before the munmap area */
+ struct vm_area_struct *next; /* vma after the munmap area */
struct list_head *uf; /* Userfaultfd list_head */
unsigned long start; /* Aligned start addr (inclusive) */
unsigned long end; /* Aligned end addr (exclusive) */
int vma_count; /* Number of vmas that will be removed */
unsigned long nr_pages; /* Number of pages being removed */
unsigned long locked_vm; /* Number of locked pages */
+ unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
+ unsigned long exec_vm;
+ unsigned long stack_vm;
+ unsigned long data_vm;
bool unlock; /* Unlock after the munmap */
};
diff --git a/mm/mmap.c b/mm/mmap.c
index 1ed0720c38c5..62ff7aa10004 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -523,7 +523,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
vms->unlock = unlock;
vms->uf = uf;
vms->vma_count = 0;
- vms->nr_pages = vms->locked_vm = 0;
+ vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+ vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
}
/*
@@ -2388,30 +2389,6 @@ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
return vma;
}
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
- unsigned long nr_accounted = 0;
- struct vm_area_struct *vma;
-
- /* Update high watermark before we lower total_vm */
- update_hiwater_vm(mm);
- mas_for_each(mas, vma, ULONG_MAX) {
- long nrpages = vma_pages(vma);
-
- if (vma->vm_flags & VM_ACCOUNT)
- nr_accounted += nrpages;
- vm_stat_account(mm, vma->vm_flags, -nrpages);
- remove_vma(vma, false);
- }
- vm_unacct_memory(nr_accounted);
-}
-
/*
* Get rid of page table information in the indicated region.
*
@@ -2632,15 +2609,14 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
* @vms: The vma munmap struct
* @mas_detach: The maple state of the detached vmas
*
- * This updates the mm_struct, unmaps the region, frees the resources
+ * This function updates the mm_struct, unmaps the region, frees the resources
* used for the munmap() and may downgrade the lock - if requested. Everything
* needed to be done once the vma maple tree is updated.
*/
-
static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach)
{
- struct vm_area_struct *prev, *next;
+ struct vm_area_struct *vma;
struct mm_struct *mm;
mm = vms->mm;
@@ -2649,21 +2625,26 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
if (vms->unlock)
mmap_write_downgrade(mm);
- prev = vma_iter_prev_range(vms->vmi);
- next = vma_next(vms->vmi);
- if (next)
- vma_iter_prev_range(vms->vmi);
-
/*
* We can free page tables without write-locking mmap_lock because VMAs
* were isolated before we downgraded mmap_lock.
*/
mas_set(mas_detach, 1);
- unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
- vms->vma_count, !vms->unlock);
- /* Statistics and freeing VMAs */
+ unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
+ vms->start, vms->end, vms->vma_count, !vms->unlock);
+ /* Update high watermark before we lower total_vm */
+ update_hiwater_vm(mm);
+ /* Stat accounting */
+ WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+ mm->exec_vm -= vms->exec_vm;
+ mm->stack_vm -= vms->stack_vm;
+ mm->data_vm -= vms->data_vm;
+ /* Remove and clean up vmas */
mas_set(mas_detach, 0);
- remove_mt(mm, mas_detach);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ remove_vma(vma, false);
+
+ vm_unacct_memory(vms->nr_accounted);
validate_mm(mm);
if (vms->unlock)
mmap_read_unlock(mm);
@@ -2711,13 +2692,14 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
if (error)
goto start_split_failed;
}
+ vms->prev = vma_prev(vms->vmi);
/*
* Detach a range of VMAs from the mm. Using next as a temp variable as
* it is always overwritten.
*/
- next = vms->vma;
- do {
+ for_each_vma_range(*(vms->vmi), next, vms->end) {
+ long nrpages;
/* Does it split the end? */
if (next->vm_end > vms->end) {
error = __split_vma(vms->vmi, next, vms->end, 0);
@@ -2731,6 +2713,22 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
goto munmap_gather_failed;
vma_mark_detached(next, true);
+ nrpages = vma_pages(next);
+
+ vms->nr_pages += nrpages;
+ if (next->vm_flags & VM_LOCKED)
+ vms->locked_vm += nrpages;
+
+ if (next->vm_flags & VM_ACCOUNT)
+ vms->nr_accounted += nrpages;
+
+ if (is_exec_mapping(next->vm_flags))
+ vms->exec_vm += nrpages;
+ else if (is_stack_mapping(next->vm_flags))
+ vms->stack_vm += nrpages;
+ else if (is_data_mapping(next->vm_flags))
+ vms->data_vm += nrpages;
+
if (next->vm_flags & VM_LOCKED)
vms->locked_vm += vma_pages(next);
@@ -2754,7 +2752,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
BUG_ON(next->vm_start < vms->start);
BUG_ON(next->vm_start > vms->end);
#endif
- } for_each_vma_range(*(vms->vmi), next, vms->end);
+ }
+
+ vms->next = vma_next(vms->vmi);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
/* Make sure no VMAs are about to be lost. */
--
2.43.0
Powered by blists - more mailing lists