[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211005012959.1110504-54-Liam.Howlett@oracle.com>
Date: Tue, 5 Oct 2021 01:31:06 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Douglas Gilbert <dgilbert@...erlog.com>
CC: Song Liu <songliubraving@...com>,
Davidlohr Bueso <dave@...olabs.net>,
"Paul E . McKenney" <paulmck@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Rik van Riel <riel@...riel.com>,
Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH v3 53/66] mm/mempolicy: Use maple tree iterators instead of
vma linked list
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
mm/mempolicy.c | 41 +++++++++++++++++++++++++----------------
1 file changed, 25 insertions(+), 16 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1592b081c58e..81ea28c8c6c8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -377,10 +377,13 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_write_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ mas_lock(&mas);
+ mas_for_each(&mas, vma, ULONG_MAX)
mpol_rebind_policy(vma->vm_policy, new);
+ mas_unlock(&mas);
mmap_write_unlock(mm);
}
@@ -652,7 +655,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
static int queue_pages_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->vma;
+ struct vm_area_struct *next, *vma = walk->vma;
struct queue_pages *qp = walk->private;
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
@@ -667,9 +670,10 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
/* hole at head side of range */
return -EFAULT;
}
+ next = vma_next(vma->vm_mm, vma);
if (!(flags & MPOL_MF_DISCONTIG_OK) &&
((vma->vm_end < qp->end) &&
- (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
+ (!next || vma->vm_end < next->vm_start)))
/* hole at middle or tail of range */
return -EFAULT;
@@ -783,28 +787,28 @@ static int vma_replace_policy(struct vm_area_struct *vma,
static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
{
- struct vm_area_struct *next;
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
+ unsigned long index = start;
- vma = find_vma(mm, start);
+ rcu_read_lock();
+ vma = find_vma_intersection(mm, start, end);
VM_BUG_ON(!vma);
- prev = vma->vm_prev;
+ prev = vma_prev(mm, vma);
if (start > vma->vm_start)
prev = vma;
- for (; vma && vma->vm_start < end; prev = vma, vma = next) {
- next = vma->vm_next;
+ mt_for_each(&mm->mm_mt, vma, index, end - 1) {
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
if (mpol_equal(vma_policy(vma), new_pol))
- continue;
+ goto next;
pgoff = vma->vm_pgoff +
((vmstart - vma->vm_start) >> PAGE_SHIFT);
@@ -813,7 +817,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
new_pol, vma->vm_userfaultfd_ctx);
if (prev) {
vma = prev;
- next = vma->vm_next;
if (mpol_equal(vma_policy(vma), new_pol))
continue;
/* vma_merge() joined vma && vma->next, case 8 */
@@ -829,13 +832,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
- replace:
+replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
+next:
+ prev = vma;
}
- out:
+out:
+ rcu_read_unlock();
return err;
}
@@ -1063,6 +1069,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
int flags)
{
nodemask_t nmask;
+ struct vm_area_struct *vma;
LIST_HEAD(pagelist);
int err = 0;
struct migration_target_control mtc = {
@@ -1078,8 +1085,9 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
* need migration. Between passing in the full user address
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
+ vma = find_vma(mm, 0);
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
- queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
@@ -1208,14 +1216,15 @@ static struct page *new_page(struct page *page, unsigned long start)
{
struct vm_area_struct *vma;
unsigned long address;
+ MA_STATE(mas, ¤t->mm->mm_mt, start, start);
- vma = find_vma(current->mm, start);
- while (vma) {
+ rcu_read_lock();
+ mas_for_each(&mas, vma, ULONG_MAX) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
break;
- vma = vma->vm_next;
}
+ rcu_read_unlock();
if (PageHuge(page)) {
return alloc_huge_page_vma(page_hstate(compound_head(page)),
--
2.30.2
Powered by blists - more mailing lists