[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240820235730.2852400-20-Liam.Howlett@oracle.com>
Date: Tue, 20 Aug 2024 19:57:28 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Suren Baghdasaryan <surenb@...gle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Matthew Wilcox <willy@...radead.org>, Vlastimil Babka <vbabka@...e.cz>,
sidhartha.kumar@...cle.com, Bert Karwatzki <spasswolf@....de>,
Jiri Olsa <olsajiri@...il.com>, Kees Cook <kees@...nel.org>,
"Paul E . McKenney" <paulmck@...nel.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH v6 19/20] mm: Move may_expand_vm() check in mmap_region()
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
The may_expand_vm() check requires the count of the pages within the
munmap range. Since this is needed for accounting and obtained later,
the reodering of ma_expand_vm() to later in the call stack, after the
vma munmap struct (vms) is initialised and the gather stage is
potentially run, will allow for a single loop over the vmas. The gather
sage does not commit any work and so everything can be undone in the
case of a failure.
The MAP_FIXED page count is available after the vms_gather_munmap_vmas()
call, so use it instead of looping over the vmas twice.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
mm/mmap.c | 15 ++++-----------
mm/vma.c | 21 ---------------------
mm/vma.h | 3 ---
3 files changed, 4 insertions(+), 35 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 49d9e95f42f5..012b3495c266 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1376,17 +1376,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
pgoff_t vm_pgoff;
int error = -ENOMEM;
VMA_ITERATOR(vmi, mm, addr);
- unsigned long nr_pages, nr_accounted;
-
- nr_pages = count_vma_pages_range(mm, addr, end, &nr_accounted);
-
- /*
- * Check against address space limit.
- * MAP_FIXED may remove pages of mappings that intersects with requested
- * mapping. Account for the pages it would unmap.
- */
- if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
- return -ENOMEM;
/* Find the first overlapping VMA */
vma = vma_find(&vmi, end);
@@ -1409,6 +1398,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma_iter_next_range(&vmi);
}
+ /* Check against address space limit. */
+ if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
+ goto abort_munmap;
+
/*
* Private writable mapping: check memory availability
*/
diff --git a/mm/vma.c b/mm/vma.c
index 5b33f7460ab7..f277ab1b0175 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -1643,27 +1643,6 @@ bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
return vma_fs_can_writeback(vma);
}
-unsigned long count_vma_pages_range(struct mm_struct *mm,
- unsigned long addr, unsigned long end,
- unsigned long *nr_accounted)
-{
- VMA_ITERATOR(vmi, mm, addr);
- struct vm_area_struct *vma;
- unsigned long nr_pages = 0;
-
- *nr_accounted = 0;
- for_each_vma_range(vmi, vma, end) {
- unsigned long vm_start = max(addr, vma->vm_start);
- unsigned long vm_end = min(end, vma->vm_end);
-
- nr_pages += PHYS_PFN(vm_end - vm_start);
- if (vma->vm_flags & VM_ACCOUNT)
- *nr_accounted += PHYS_PFN(vm_end - vm_start);
- }
-
- return nr_pages;
-}
-
static DEFINE_MUTEX(mm_all_locks_mutex);
static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
diff --git a/mm/vma.h b/mm/vma.h
index 7618ddbfd2b2..f8b4d3375a5b 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -305,9 +305,6 @@ bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
int mm_take_all_locks(struct mm_struct *mm);
void mm_drop_all_locks(struct mm_struct *mm);
-unsigned long count_vma_pages_range(struct mm_struct *mm,
- unsigned long addr, unsigned long end,
- unsigned long *nr_accounted);
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
--
2.43.0
Powered by blists - more mailing lists