[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220906165132.jv6y6if6t576wdjl@offworld>
Date: Tue, 6 Sep 2022 09:51:32 -0700
From: Davidlohr Bueso <dave@...olabs.net>
To: Liam Howlett <liam.howlett@...cle.com>
Cc: "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: Re: [PATCH v13 52/70] mm/khugepaged: stop using vma linked list
On Mon, 22 Aug 2022, Liam Howlett wrote:
>From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
>
>Use vma iterator & find_vma() instead of vma linked list.
>
>Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
>Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Reviewed-by: Davidlohr Bueso <dave@...olabs.net>
>---
> mm/huge_memory.c | 4 ++--
> mm/khugepaged.c | 11 ++++++++---
> 2 files changed, 10 insertions(+), 5 deletions(-)
>
>diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>index 83c47a989260..6c5c23ef658a 100644
>--- a/mm/huge_memory.c
>+++ b/mm/huge_memory.c
>@@ -2339,11 +2339,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
> split_huge_pmd_if_needed(vma, end);
>
> /*
>- * If we're also updating the vma->vm_next->vm_start,
>+ * If we're also updating the next vma vm_start,
> * check if we need to split it.
> */
> if (adjust_next > 0) {
>- struct vm_area_struct *next = vma->vm_next;
>+ struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
> unsigned long nstart = next->vm_start;
> nstart += adjust_next;
> split_huge_pmd_if_needed(next, nstart);
>diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>index d3313b7a8fe5..d8e388106322 100644
>--- a/mm/khugepaged.c
>+++ b/mm/khugepaged.c
>@@ -2053,10 +2053,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> __releases(&khugepaged_mm_lock)
> __acquires(&khugepaged_mm_lock)
> {
>+ struct vma_iterator vmi;
> struct mm_slot *mm_slot;
> struct mm_struct *mm;
> struct vm_area_struct *vma;
> int progress = 0;
>+ unsigned long address;
Nit: just use khugepaged_scan.address.
>
> VM_BUG_ON(!pages);
> lockdep_assert_held(&khugepaged_mm_lock);
>@@ -2081,11 +2083,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> vma = NULL;
> if (unlikely(!mmap_read_trylock(mm)))
> goto breakouterloop_mmap_lock;
>- if (likely(!hpage_collapse_test_exit(mm)))
>- vma = find_vma(mm, khugepaged_scan.address);
>
> progress++;
>- for (; vma; vma = vma->vm_next) {
>+ if (unlikely(hpage_collapse_test_exit(mm)))
>+ goto breakouterloop;
>+
>+ address = khugepaged_scan.address;
>+ vma_iter_init(&vmi, mm, address);
>+ for_each_vma(vmi, vma) {
> unsigned long hstart, hend;
>
> cond_resched();
>
>--
>2.35.1
>
Powered by blists - more mailing lists