[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <xdkrref3md2rfc3sou6lta2vcevz6e4ckjd6q67znpipkvxbmw@gftpxkrtlqnx>
Date: Thu, 12 Jun 2025 12:17:58 +1000
From: Alistair Popple <apopple@...dia.com>
To: David Hildenbrand <david@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
nvdimm@...ts.linux.dev, linux-cxl@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>, Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>, Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>, Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>, Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>, Nico Pache <npache@...hat.com>,
Ryan Roberts <ryan.roberts@....com>, Dev Jain <dev.jain@....com>,
Dan Williams <dan.j.williams@...el.com>, Oscar Salvador <osalvador@...e.de>
Subject: Re: [PATCH v2 2/3] mm/huge_memory: don't mark refcounted folios
special in vmf_insert_folio_pmd()
On Wed, Jun 11, 2025 at 02:06:53PM +0200, David Hildenbrand wrote:
> Marking PMDs that map a "normal" refcounted folios as special is
> against our rules documented for vm_normal_page().
>
> Fortunately, there are not that many pmd_special() check that can be
> mislead, and most vm_normal_page_pmd()/vm_normal_folio_pmd() users that
> would get this wrong right now are rather harmless: e.g., none so far
> bases decisions whether to grab a folio reference on that decision.
>
> Well, and GUP-fast will fallback to GUP-slow. All in all, so far no big
> implications as it seems.
>
> Getting this right will get more important as we use
> folio_normal_page_pmd() in more places.
>
> Fix it by teaching insert_pfn_pmd() to properly handle folios and
> pfns -- moving refcount/mapcount/etc handling in there, renaming it to
> insert_pmd(), and distinguishing between both cases using a new simple
> "struct folio_or_pfn" structure.
>
> Use folio_mk_pmd() to create a pmd for a folio cleanly.
>
> Fixes: 6c88f72691f8 ("mm/huge_memory: add vmf_insert_folio_pmd()")
> Signed-off-by: David Hildenbrand <david@...hat.com>
> ---
> mm/huge_memory.c | 58 ++++++++++++++++++++++++++++++++----------------
> 1 file changed, 39 insertions(+), 19 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 49b98082c5401..7e3e9028873e5 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1372,9 +1372,17 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> return __do_huge_pmd_anonymous_page(vmf);
> }
>
> -static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> - pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
> - pgtable_t pgtable)
> +struct folio_or_pfn {
> + union {
> + struct folio *folio;
> + pfn_t pfn;
> + };
> + bool is_folio;
> +};
I know it's simple, but I'm still not a fan particularly as these types of
patterns tend to proliferate once introduced. See below for a suggestion.
> +static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
> + pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
> + bool write, pgtable_t pgtable)
> {
> struct mm_struct *mm = vma->vm_mm;
> pmd_t entry;
> @@ -1382,8 +1390,11 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> lockdep_assert_held(pmd_lockptr(mm, pmd));
>
> if (!pmd_none(*pmd)) {
> + const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
> + pfn_t_to_pfn(fop.pfn);
> +
> if (write) {
> - if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
> + if (pmd_pfn(*pmd) != pfn) {
> WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
> return -EEXIST;
> }
> @@ -1396,11 +1407,19 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
> return -EEXIST;
> }
>
> - entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
> - if (pfn_t_devmap(pfn))
> - entry = pmd_mkdevmap(entry);
> - else
> - entry = pmd_mkspecial(entry);
> + if (fop.is_folio) {
> + entry = folio_mk_pmd(fop.folio, vma->vm_page_prot);
> +
> + folio_get(fop.folio);
> + folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
> + add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
> + } else {
> + entry = pmd_mkhuge(pfn_t_pmd(fop.pfn, prot));
> + if (pfn_t_devmap(fop.pfn))
> + entry = pmd_mkdevmap(entry);
> + else
> + entry = pmd_mkspecial(entry);
> + }
Could we change insert_pfn_pmd() to insert_pmd_entry() and have callers call
something like pfn_to_pmd_entry() or folio_to_pmd_entry() to create the pmd_t
entry as appropriate, which is then passed to insert_pmd_entry() to do the bits
common to both?
> if (write) {
> entry = pmd_mkyoung(pmd_mkdirty(entry));
> entry = maybe_pmd_mkwrite(entry, vma);
> @@ -1431,6 +1450,9 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
> unsigned long addr = vmf->address & PMD_MASK;
> struct vm_area_struct *vma = vmf->vma;
> pgprot_t pgprot = vma->vm_page_prot;
> + struct folio_or_pfn fop = {
> + .pfn = pfn,
> + };
> pgtable_t pgtable = NULL;
> spinlock_t *ptl;
> int error;
> @@ -1458,8 +1480,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
> pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
>
> ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> - error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write,
> - pgtable);
> + error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
> + pgtable);
> spin_unlock(ptl);
> if (error && pgtable)
> pte_free(vma->vm_mm, pgtable);
> @@ -1474,6 +1496,10 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
> struct vm_area_struct *vma = vmf->vma;
> unsigned long addr = vmf->address & PMD_MASK;
> struct mm_struct *mm = vma->vm_mm;
> + struct folio_or_pfn fop = {
> + .folio = folio,
> + .is_folio = true,
> + };
> spinlock_t *ptl;
> pgtable_t pgtable = NULL;
> int error;
> @@ -1491,14 +1517,8 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
> }
>
> ptl = pmd_lock(mm, vmf->pmd);
> - if (pmd_none(*vmf->pmd)) {
> - folio_get(folio);
> - folio_add_file_rmap_pmd(folio, &folio->page, vma);
> - add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR);
> - }
> - error = insert_pfn_pmd(vma, addr, vmf->pmd,
> - pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot,
> - write, pgtable);
> + error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot,
> + write, pgtable);
> spin_unlock(ptl);
> if (error && pgtable)
> pte_free(mm, pgtable);
> --
> 2.49.0
>
Powered by blists - more mailing lists