[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210112161240.2024684-25-Liam.Howlett@Oracle.com>
Date: Tue, 12 Jan 2021 11:11:54 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: maple-tree@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...gle.com>, Song Liu <songliubraving@...com>,
Davidlohr Bueso <dave@...olabs.net>,
"Paul E . McKenney" <paulmck@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Jerome Glisse <jglisse@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Rik van Riel <riel@...riel.com>,
Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH v2 24/70] mmap: Remove __do_munmap() in favour of do_mas_munmap()
Export new interface and use it in place of old interface.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
include/linux/mm.h | 4 ++--
mm/mmap.c | 16 ++++------------
mm/mremap.c | 7 ++++---
3 files changed, 10 insertions(+), 17 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 996353a057500..680dcfe07dbb6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2573,8 +2573,8 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
- struct list_head *uf, bool downgrade);
+extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
diff --git a/mm/mmap.c b/mm/mmap.c
index c0a64c4726b67..9be91b47db6b4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2555,13 +2555,6 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
return downgrade ? 1 : 0;
}
-int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
- struct list_head *uf, bool downgrade)
-{
- MA_STATE(mas, &mm->mm_mt, start, start);
- return do_mas_munmap(&mas, mm, start, len, uf, downgrade);
-}
-
/*
* do_mas_munmap() - munmap a given range.
* @mas: The maple state
@@ -2610,7 +2603,8 @@ int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{
- return __do_munmap(mm, start, len, uf, false);
+ MA_STATE(mas, &mm->mm_mt, start, start);
+ return do_mas_munmap(&mas, mm, start, len, uf, false);
}
unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2858,11 +2852,12 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
int ret;
struct mm_struct *mm = current->mm;
LIST_HEAD(uf);
+ MA_STATE(mas, &mm->mm_mt, start, start);
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __do_munmap(mm, start, len, &uf, downgrade);
+ ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade);
/*
* Returning 1 indicates mmap_lock is downgraded.
* But 1 is not legal return value of vm_munmap() and munmap(), reset
@@ -3014,9 +3009,6 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
arch_unmap(mm, newbrk, oldbrk);
if (likely(vma->vm_start >= newbrk)) { // remove entire mapping(s)
- mas_set(mas, newbrk);
- if (vma->vm_start != newbrk)
- mas_reset(mas); // cause a re-walk for the first overlap.
ret = do_mas_munmap(mas, mm, newbrk, oldbrk-newbrk, uf, true);
goto munmap_full_vma;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 138abbae4f758..a7526a8c1fe5a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -723,14 +723,15 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
- * __do_munmap does all the needed commit accounting, and
+ * do_mas_munmap does all the needed commit accounting, and
* downgrades mmap_lock to read if so directed.
*/
if (old_len >= new_len) {
int retval;
+ MA_STATE(mas, &mm->mm_mt, addr + new_len, addr + new_len);
- retval = __do_munmap(mm, addr+new_len, old_len - new_len,
- &uf_unmap, true);
+ retval = do_mas_munmap(&mas, mm, addr + new_len,
+ old_len - new_len, &uf_unmap, true);
if (retval < 0 && old_len != new_len) {
ret = retval;
goto out;
--
2.28.0
Powered by blists - more mailing lists