[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3c03d009-6a46-4321-a38b-9707b4558618@arm.com>
Date: Tue, 5 Dec 2023 12:22:37 +0000
From: Ryan Roberts <ryan.roberts@....com>
To: David Hildenbrand <david@...hat.com>, linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Hugh Dickins <hughd@...gle.com>,
Yin Fengwei <fengwei.yin@...el.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <muchun.song@...ux.dev>,
Peter Xu <peterx@...hat.com>
Subject: Re: [PATCH RFC 15/39] mm/huge_memory: batch rmap operations in
__split_huge_pmd_locked()
On 04/12/2023 14:21, David Hildenbrand wrote:
> Let's use folio_add_anon_rmap_ptes(), batching the rmap operations.
>
> While at it, use more folio operations (but only in the code branch we're
> touching), use VM_WARN_ON_FOLIO(), and pass RMAP_COMPOUND instead of
You mean RMAP_EXCLUSIVE?
> manually setting PageAnonExclusive.
>
> We should never see non-anon pages on that branch: otherwise, the
> existing page_add_anon_rmap() call would have been flawed already.
>
> Signed-off-by: David Hildenbrand <david@...hat.com>
> ---
> mm/huge_memory.c | 23 +++++++++++++++--------
> 1 file changed, 15 insertions(+), 8 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index cb33c6e0404cf..2c037ab3f4916 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2099,6 +2099,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> unsigned long haddr, bool freeze)
> {
> struct mm_struct *mm = vma->vm_mm;
> + struct folio *folio;
> struct page *page;
> pgtable_t pgtable;
> pmd_t old_pmd, _pmd;
> @@ -2194,16 +2195,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> uffd_wp = pmd_swp_uffd_wp(old_pmd);
> } else {
> page = pmd_page(old_pmd);
> + folio = page_folio(page);
> if (pmd_dirty(old_pmd)) {
> dirty = true;
> - SetPageDirty(page);
> + folio_set_dirty(folio);
> }
> write = pmd_write(old_pmd);
> young = pmd_young(old_pmd);
> soft_dirty = pmd_soft_dirty(old_pmd);
> uffd_wp = pmd_uffd_wp(old_pmd);
>
> - VM_BUG_ON_PAGE(!page_count(page), page);
> + VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
> + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
>
> /*
> * Without "freeze", we'll simply split the PMD, propagating the
> @@ -2220,11 +2223,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> *
> * See page_try_share_anon_rmap(): invalidate PMD first.
> */
> - anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
> + anon_exclusive = PageAnonExclusive(page);
> if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
> freeze = false;
> - if (!freeze)
> - page_ref_add(page, HPAGE_PMD_NR - 1);
> + if (!freeze) {
> + rmap_t rmap_flags = RMAP_NONE;
> +
> + folio_ref_add(folio, HPAGE_PMD_NR - 1);
> + if (anon_exclusive)
> + rmap_flags = RMAP_EXCLUSIVE;
nit: I'd be inclined to make this |= since you're accumulating optional falgs.
Yes, its the only one so it still works as is...
> + folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
> + vma, haddr, rmap_flags);
> + }
> }
>
> /*
> @@ -2267,8 +2277,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
> if (write)
> entry = pte_mkwrite(entry, vma);
> - if (anon_exclusive)
> - SetPageAnonExclusive(page + i);
> if (!young)
> entry = pte_mkold(entry);
> /* NOTE: this may set soft-dirty too on some archs */
> @@ -2278,7 +2286,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> entry = pte_mksoft_dirty(entry);
> if (uffd_wp)
> entry = pte_mkuffd_wp(entry);
> - page_add_anon_rmap(page + i, vma, addr, RMAP_NONE);
> }
> VM_BUG_ON(!pte_none(ptep_get(pte)));
> set_pte_at(mm, addr, pte, entry);
Powered by blists - more mailing lists