[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2ec286a2-4620-4e6d-ad3d-9b4c0d9e1394@linux.alibaba.com>
Date: Tue, 14 May 2024 14:26:44 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Lance Yang <ioworker0@...il.com>, akpm@...ux-foundation.org
Cc: willy@...radead.org, sj@...nel.org, maskray@...gle.com, ziy@...dia.com,
ryan.roberts@....com, david@...hat.com, 21cnbao@...il.com, mhocko@...e.com,
fengwei.yin@...el.com, zokeefe@...gle.com, shy828301@...il.com,
xiehuan09@...il.com, libang.li@...group.com, wangkefeng.wang@...wei.com,
songmuchun@...edance.com, peterx@...hat.com, minchan@...nel.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH RESEND v5 1/4] mm/rmap: remove duplicated exit code in
pagewalk loop
On 2024/5/13 15:47, Lance Yang wrote:
> Introduce the labels walk_done and walk_done_err as exit points to
> eliminate duplicated exit code in the pagewalk loop.
>
> Reviewed-by: Zi Yan <ziy@...dia.com>
> Signed-off-by: Lance Yang <ioworker0@...il.com>
LGTM.
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> ---
> mm/rmap.c | 40 +++++++++++++++-------------------------
> 1 file changed, 15 insertions(+), 25 deletions(-)
>
> diff --git a/mm/rmap.c b/mm/rmap.c
> index e8fc5ecb59b2..ddffa30c79fb 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1679,9 +1679,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> /* Restore the mlock which got missed */
> if (!folio_test_large(folio))
> mlock_vma_folio(folio, vma);
> - page_vma_mapped_walk_done(&pvmw);
> - ret = false;
> - break;
> + goto walk_done_err;
> }
>
> pfn = pte_pfn(ptep_get(pvmw.pte));
> @@ -1719,11 +1717,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> */
> if (!anon) {
> VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
> - if (!hugetlb_vma_trylock_write(vma)) {
> - page_vma_mapped_walk_done(&pvmw);
> - ret = false;
> - break;
> - }
> + if (!hugetlb_vma_trylock_write(vma))
> + goto walk_done_err;
> if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
> hugetlb_vma_unlock_write(vma);
> flush_tlb_range(vma,
> @@ -1738,8 +1733,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> * actual page and drop map count
> * to zero.
> */
> - page_vma_mapped_walk_done(&pvmw);
> - break;
> + goto walk_done;
> }
> hugetlb_vma_unlock_write(vma);
> }
> @@ -1811,9 +1805,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> if (unlikely(folio_test_swapbacked(folio) !=
> folio_test_swapcache(folio))) {
> WARN_ON_ONCE(1);
> - ret = false;
> - page_vma_mapped_walk_done(&pvmw);
> - break;
> + goto walk_done_err;
> }
>
> /* MADV_FREE page check */
> @@ -1852,23 +1844,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> */
> set_pte_at(mm, address, pvmw.pte, pteval);
> folio_set_swapbacked(folio);
> - ret = false;
> - page_vma_mapped_walk_done(&pvmw);
> - break;
> + goto walk_done_err;
> }
>
> if (swap_duplicate(entry) < 0) {
> set_pte_at(mm, address, pvmw.pte, pteval);
> - ret = false;
> - page_vma_mapped_walk_done(&pvmw);
> - break;
> + goto walk_done_err;
> }
> if (arch_unmap_one(mm, vma, address, pteval) < 0) {
> swap_free(entry);
> set_pte_at(mm, address, pvmw.pte, pteval);
> - ret = false;
> - page_vma_mapped_walk_done(&pvmw);
> - break;
> + goto walk_done_err;
> }
>
> /* See folio_try_share_anon_rmap(): clear PTE first. */
> @@ -1876,9 +1862,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> folio_try_share_anon_rmap_pte(folio, subpage)) {
> swap_free(entry);
> set_pte_at(mm, address, pvmw.pte, pteval);
> - ret = false;
> - page_vma_mapped_walk_done(&pvmw);
> - break;
> + goto walk_done_err;
> }
> if (list_empty(&mm->mmlist)) {
> spin_lock(&mmlist_lock);
> @@ -1918,6 +1902,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> if (vma->vm_flags & VM_LOCKED)
> mlock_drain_local();
> folio_put(folio);
> + continue;
> +walk_done_err:
> + ret = false;
> +walk_done:
> + page_vma_mapped_walk_done(&pvmw);
> + break;
> }
>
> mmu_notifier_invalidate_range_end(&range);
Powered by blists - more mailing lists