[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210112161240.2024684-59-Liam.Howlett@Oracle.com>
Date: Tue, 12 Jan 2021 11:12:28 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: maple-tree@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...gle.com>, Song Liu <songliubraving@...com>,
Davidlohr Bueso <dave@...olabs.net>,
"Paul E . McKenney" <paulmck@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Jerome Glisse <jglisse@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Rik van Riel <riel@...riel.com>,
Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH v2 58/70] mm/mempolicy: Use maple tree iterators instead of vma linked list
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
mm/mempolicy.c | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3ca4898f3f249..e0b8e658f18eb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -404,9 +404,10 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_write_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ mas_for_each(&mas, vma, ULONG_MAX)
mpol_rebind_policy(vma->vm_policy, new);
mmap_write_unlock(mm);
}
@@ -671,7 +672,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
static int queue_pages_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->vma;
+ struct vm_area_struct *next, *vma = walk->vma;
struct queue_pages *qp = walk->private;
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
@@ -686,9 +687,10 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
/* hole at head side of range */
return -EFAULT;
}
+ next = vma_next(vma->vm_mm, vma);
if (!(flags & MPOL_MF_DISCONTIG_OK) &&
((vma->vm_end < qp->end) &&
- (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
+ (!next || vma->vm_end < next->vm_start)))
/* hole at middle or tail of range */
return -EFAULT;
@@ -809,21 +811,22 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
+ MA_STATE(mas, &mm->mm_mt, start, start);
- vma = find_vma(mm, start);
+ vma = mas_find(&mas, ULONG_MAX);
VM_BUG_ON(!vma);
- prev = vma->vm_prev;
+ prev = vma_mas_prev(&mas);
if (start > vma->vm_start)
prev = vma;
- for (; vma && vma->vm_start < end; prev = vma, vma = next) {
- next = vma->vm_next;
+ mas_for_each(&mas, vma, end - 1) {
+ next = vma_next(mm, vma);
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
if (mpol_equal(vma_policy(vma), new_pol))
- continue;
+ goto next;
pgoff = vma->vm_pgoff +
((vmstart - vma->vm_start) >> PAGE_SHIFT);
@@ -832,7 +835,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
new_pol, vma->vm_userfaultfd_ctx);
if (prev) {
vma = prev;
- next = vma->vm_next;
+ next = vma_next(mm, vma);
if (mpol_equal(vma_policy(vma), new_pol))
continue;
/* vma_merge() joined vma && vma->next, case 8 */
@@ -847,11 +850,14 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
err = split_vma(vma->vm_mm, vma, vmend, 0);
if (err)
goto out;
+ mas_pause(&mas);
}
replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
+next:
+ prev = vma;
}
out:
@@ -1072,6 +1078,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
int flags)
{
nodemask_t nmask;
+ struct vm_area_struct *vma;
LIST_HEAD(pagelist);
int err = 0;
struct migration_target_control mtc = {
@@ -1087,8 +1094,9 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
* need migration. Between passing in the full user address
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
+ vma = find_vma(mm, 0);
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
- queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
@@ -1217,13 +1225,12 @@ static struct page *new_page(struct page *page, unsigned long start)
{
struct vm_area_struct *vma;
unsigned long address;
+ MA_STATE(mas, ¤t->mm->mm_mt, start, start);
- vma = find_vma(current->mm, start);
- while (vma) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
break;
- vma = vma->vm_next;
}
if (PageHuge(page)) {
--
2.28.0
Powered by blists - more mailing lists