[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221129164352.3374638-21-Liam.Howlett@oracle.com>
Date: Tue, 29 Nov 2022 16:44:29 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>
CC: Liam Howlett <liam.howlett@...cle.com>,
Liam Howlett <liam.howlett@...cle.com>
Subject: [PATCH 20/43] sched: Convert to vma iterator
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Use the vma iterator so that the iterator can be invalidated or updated
to avoid each caller doing so.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
kernel/sched/fair.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e4a0b8bd941c..aa780a00bf4e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2926,11 +2926,11 @@ static void task_numa_work(struct callback_head *work)
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
u64 runtime = p->se.sum_exec_runtime;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long nr_pte_updates = 0;
long pages, virtpages;
+ struct vma_iterator vmi;
SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
@@ -2983,16 +2983,16 @@ static void task_numa_work(struct callback_head *work)
if (!mmap_read_trylock(mm))
return;
- mas_set(&mas, start);
- vma = mas_find(&mas, ULONG_MAX);
+ vma_iter_init(&vmi, mm, start);
+ vma = vma_next(&vmi);
if (!vma) {
reset_ptenuma_scan(p);
start = 0;
- mas_set(&mas, start);
- vma = mas_find(&mas, ULONG_MAX);
+ vma_iter_set(&vmi, start);
+ vma = vma_next(&vmi);
}
- for (; vma; vma = mas_find(&mas, ULONG_MAX)) {
+ do {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue;
@@ -3039,7 +3039,7 @@ static void task_numa_work(struct callback_head *work)
cond_resched();
} while (end != vma->vm_end);
- }
+ } for_each_vma(vmi, vma);
out:
/*
--
2.35.1
Powered by blists - more mailing lists