[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <230eb1be-180e-997e-ca51-c67cfa2dcb72@csgroup.eu>
Date: Tue, 2 Aug 2022 10:36:26 +0000
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Liam Howlett <liam.howlett@...cle.com>,
"maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>
CC: Yu Zhao <yuzhao@...gle.com>,
"linuxppc-dev@...ts.ozlabs.org" <linuxppc-dev@...ts.ozlabs.org>
Subject: Re: [PATCH v12 33/69] powerpc: remove mmap linked list walks
Le 20/07/2022 à 04:17, Liam Howlett a écrit :
> From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
>
> Use the VMA iterator instead.
Can you please copy powerpc maintainers/reviewers when sending such a
patch ?
Thanks
Christophe
>
> Link: https://lkml.kernel.org/r/20220504011345.662299-18-Liam.Howlett@oracle.com
> Link: https://lkml.kernel.org/r/20220621204632.3370049-34-Liam.Howlett@oracle.com
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
> Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
> Cc: Catalin Marinas <catalin.marinas@....com>
> Cc: David Howells <dhowells@...hat.com>
> Cc: SeongJae Park <sj@...nel.org>
> Cc: Will Deacon <will@...nel.org>
> Cc: Davidlohr Bueso <dave@...olabs.net>
> Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
> ---
> arch/powerpc/kernel/vdso.c | 6 +++---
> arch/powerpc/mm/book3s32/tlb.c | 11 ++++++-----
> arch/powerpc/mm/book3s64/subpage_prot.c | 13 ++-----------
> 3 files changed, 11 insertions(+), 19 deletions(-)
>
> diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
> index 0da287544054..94a8fa5017c3 100644
> --- a/arch/powerpc/kernel/vdso.c
> +++ b/arch/powerpc/kernel/vdso.c
> @@ -113,18 +113,18 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
> int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
> {
> struct mm_struct *mm = task->mm;
> + VMA_ITERATOR(vmi, mm, 0);
> struct vm_area_struct *vma;
>
> mmap_read_lock(mm);
> -
> - for (vma = mm->mmap; vma; vma = vma->vm_next) {
> + for_each_vma(vmi, vma) {
> unsigned long size = vma->vm_end - vma->vm_start;
>
> if (vma_is_special_mapping(vma, &vvar_spec))
> zap_page_range(vma, vma->vm_start, size);
> }
> -
> mmap_read_unlock(mm);
> +
> return 0;
> }
>
> diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
> index 19f0ef950d77..9ad6b56bfec9 100644
> --- a/arch/powerpc/mm/book3s32/tlb.c
> +++ b/arch/powerpc/mm/book3s32/tlb.c
> @@ -81,14 +81,15 @@ EXPORT_SYMBOL(hash__flush_range);
> void hash__flush_tlb_mm(struct mm_struct *mm)
> {
> struct vm_area_struct *mp;
> + VMA_ITERATOR(vmi, mm, 0);
>
> /*
> - * It is safe to go down the mm's list of vmas when called
> - * from dup_mmap, holding mmap_lock. It would also be safe from
> - * unmap_region or exit_mmap, but not from vmtruncate on SMP -
> - * but it seems dup_mmap is the only SMP case which gets here.
> + * It is safe to iterate the vmas when called from dup_mmap,
> + * holding mmap_lock. It would also be safe from unmap_region
> + * or exit_mmap, but not from vmtruncate on SMP - but it seems
> + * dup_mmap is the only SMP case which gets here.
> */
> - for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
> + for_each_vma(vmi, mp)
> hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
> }
> EXPORT_SYMBOL(hash__flush_tlb_mm);
> diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
> index 60c6ea16a972..d73b3b4176e8 100644
> --- a/arch/powerpc/mm/book3s64/subpage_prot.c
> +++ b/arch/powerpc/mm/book3s64/subpage_prot.c
> @@ -149,24 +149,15 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
> unsigned long len)
> {
> struct vm_area_struct *vma;
> + VMA_ITERATOR(vmi, mm, addr);
>
> /*
> * We don't try too hard, we just mark all the vma in that range
> * VM_NOHUGEPAGE and split them.
> */
> - vma = find_vma(mm, addr);
> - /*
> - * If the range is in unmapped range, just return
> - */
> - if (vma && ((addr + len) <= vma->vm_start))
> - return;
> -
> - while (vma) {
> - if (vma->vm_start >= (addr + len))
> - break;
> + for_each_vma_range(vmi, vma, addr + len) {
> vma->vm_flags |= VM_NOHUGEPAGE;
> walk_page_vma(vma, &subpage_walk_ops, NULL);
> - vma = vma->vm_next;
> }
> }
> #else
Powered by blists - more mailing lists