[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240822192543.3359552-19-Liam.Howlett@oracle.com>
Date: Thu, 22 Aug 2024 15:25:40 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Suren Baghdasaryan <surenb@...gle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Matthew Wilcox <willy@...radead.org>, Vlastimil Babka <vbabka@...e.cz>,
sidhartha.kumar@...cle.com, Bert Karwatzki <spasswolf@....de>,
Jiri Olsa <olsajiri@...il.com>, Kees Cook <kees@...nel.org>,
"Paul E . McKenney" <paulmck@...nel.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH v7 18/21] ipc/shm, mm: Drop do_vma_munmap()
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
The do_vma_munmap() wrapper existed for callers that didn't have a vma
iterator and needed to check the vma mseal status prior to calling the
underlying munmap(). All callers now use a vma iterator and since the
mseal check has been moved to do_vmi_align_munmap() and the vmas are
aligned, this function can just be called instead.
do_vmi_align_munmap() can no longer be static as ipc/shm is using it and
it is exported via the mm.h header.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
include/linux/mm.h | 6 +++---
ipc/shm.c | 8 ++++----
mm/mmap.c | 33 ++++++---------------------------
mm/vma.c | 12 ++++++------
mm/vma.h | 4 +---
5 files changed, 20 insertions(+), 43 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b1eed30fdc06..6f1835e3b430 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3292,14 +3292,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
-extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
diff --git a/ipc/shm.c b/ipc/shm.c
index 3e3071252dac..99564c870084 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1778,8 +1778,8 @@ long ksys_shmdt(char __user *shmaddr)
*/
file = vma->vm_file;
size = i_size_read(file_inode(vma->vm_file));
- do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
- NULL, false);
+ do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
+ vma->vm_end, NULL, false);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
@@ -1803,8 +1803,8 @@ long ksys_shmdt(char __user *shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
(vma->vm_file == file)) {
- do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
- NULL, false);
+ do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
+ vma->vm_end, NULL, false);
}
vma = vma_next(&vmi);
diff --git a/mm/mmap.c b/mm/mmap.c
index aa4aa49f3b97..51ab0bdb856c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -169,11 +169,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
- * do_vma_munmap() will drop the lock on success, so update it
- * before calling do_vma_munmap().
+ * do_vmi_align_munmap() will drop the lock on success, so
+ * update it before calling do_vma_munmap().
*/
mm->brk = brk;
- if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
+ if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
+ /* unlock = */ true))
goto out;
goto success_unlocked;
@@ -1478,9 +1479,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma->vm_file = get_file(file);
/*
* call_mmap() may map PTE, so ensure there are no existing PTEs
- * call the vm_ops close function if one exists.
+ * and call the vm_ops close function if one exists.
*/
- vms_clean_up_area(&vms, &mas_detach, true);
+ vms_clean_up_area(&vms, &mas_detach);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -1742,28 +1743,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
return ret;
}
-/*
- * do_vma_munmap() - Unmap a full or partial vma.
- * @vmi: The vma iterator pointing at the vma
- * @vma: The first vma to be munmapped
- * @start: the start of the address to unmap
- * @end: The end of the address to unmap
- * @uf: The userfaultfd list_head
- * @unlock: Drop the lock on success
- *
- * unmaps a VMA mapping when the vma iterator is already in position.
- * Does not handle alignment.
- *
- * Return: 0 on success drops the lock of so directed, error on failure and will
- * still hold the lock.
- */
-int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, struct list_head *uf,
- bool unlock)
-{
- return do_vmi_align_munmap(vmi, vma, vma->vm_mm, start, end, uf, unlock);
-}
-
/*
* do_brk_flags() - Increase the brk vma if the flags match.
* @vmi: The vma iterator
diff --git a/mm/vma.c b/mm/vma.c
index 8dc60dcb6e8d..91b027eb9a38 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -658,8 +658,8 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
*/
mas_set(mas_detach, 1);
lru_add_drain();
- tlb_gather_mmu(&tlb, vms->mm);
- update_hiwater_rss(vms->mm);
+ tlb_gather_mmu(&tlb, vms->vma->vm_mm);
+ update_hiwater_rss(vms->vma->vm_mm);
unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
vms->vma_count, mm_wr_locked);
@@ -672,14 +672,14 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
}
void vms_clean_up_area(struct vma_munmap_struct *vms,
- struct ma_state *mas_detach, bool mm_wr_locked)
+ struct ma_state *mas_detach)
{
struct vm_area_struct *vma;
if (!vms->nr_pages)
return;
- vms_clear_ptes(vms, mas_detach, mm_wr_locked);
+ vms_clear_ptes(vms, mas_detach, true);
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
if (vma->vm_ops && vma->vm_ops->close)
@@ -702,7 +702,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct vm_area_struct *vma;
struct mm_struct *mm;
- mm = vms->mm;
+ mm = current->mm;
mm->map_count -= vms->vma_count;
mm->locked_vm -= vms->locked_vm;
if (vms->unlock)
@@ -770,7 +770,7 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
* its limit temporarily, to help free resources as expected.
*/
if (vms->end < vms->vma->vm_end &&
- vms->mm->map_count >= sysctl_max_map_count)
+ vms->vma->vm_mm->map_count >= sysctl_max_map_count)
goto map_count_exceeded;
/* Don't bother splitting the VMA if we can't unmap it anyway */
diff --git a/mm/vma.h b/mm/vma.h
index f710812482a1..8ca32d7cb846 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -31,7 +31,6 @@ struct unlink_vma_file_batch {
*/
struct vma_munmap_struct {
struct vma_iterator *vmi;
- struct mm_struct *mm;
struct vm_area_struct *vma; /* The first vma to munmap */
struct vm_area_struct *prev; /* vma before the munmap area */
struct vm_area_struct *next; /* vma after the munmap area */
@@ -113,7 +112,6 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
unsigned long start, unsigned long end, struct list_head *uf,
bool unlock)
{
- vms->mm = current->mm;
vms->vmi = vmi;
vms->vma = vma;
if (vma) {
@@ -140,7 +138,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach);
void vms_clean_up_area(struct vma_munmap_struct *vms,
- struct ma_state *mas_detach, bool mm_wr_locked);
+ struct ma_state *mas_detach);
/*
* reattach_vmas() - Undo any munmap work and free resources
--
2.43.0
Powered by blists - more mailing lists