[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240115184900.GV734935@nvidia.com>
Date: Mon, 15 Jan 2024 14:49:00 -0400
From: Jason Gunthorpe <jgg@...dia.com>
To: peterx@...hat.com
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>,
David Hildenbrand <david@...hat.com>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Yang Shi <shy828301@...il.com>, linux-riscv@...ts.infradead.org,
Andrew Morton <akpm@...ux-foundation.org>,
"Aneesh Kumar K . V" <aneesh.kumar@...nel.org>,
Rik van Riel <riel@...riel.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mike Rapoport <rppt@...nel.org>, John Hubbard <jhubbard@...dia.com>,
Vlastimil Babka <vbabka@...e.cz>,
Michael Ellerman <mpe@...erman.id.au>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Andrew Jones <andrew.jones@...ux.dev>,
linuxppc-dev@...ts.ozlabs.org,
Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <muchun.song@...ux.dev>,
linux-arm-kernel@...ts.infradead.org,
Christoph Hellwig <hch@...radead.org>,
Lorenzo Stoakes <lstoakes@...il.com>,
Matthew Wilcox <willy@...radead.org>
Subject: Re: [PATCH v2 10/13] mm/gup: Handle huge pud for follow_pud_mask()
On Wed, Jan 03, 2024 at 05:14:20PM +0800, peterx@...hat.com wrote:
> diff --git a/mm/gup.c b/mm/gup.c
> index 63845b3ec44f..760406180222 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -525,6 +525,70 @@ static struct page *no_page_table(struct vm_area_struct *vma,
> return NULL;
> }
>
> +#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
> +static struct page *follow_huge_pud(struct vm_area_struct *vma,
> + unsigned long addr, pud_t *pudp,
> + int flags, struct follow_page_context *ctx)
> +{
> + struct mm_struct *mm = vma->vm_mm;
> + struct page *page;
> + pud_t pud = *pudp;
> + unsigned long pfn = pud_pfn(pud);
> + int ret;
> +
> + assert_spin_locked(pud_lockptr(mm, pudp));
> +
> + if ((flags & FOLL_WRITE) && !pud_write(pud))
> + return NULL;
> +
> + if (!pud_present(pud))
> + return NULL;
> +
> + pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
> +
> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
> + if (pud_devmap(pud)) {
Can this use IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) ?
> + /*
> + * device mapped pages can only be returned if the caller
> + * will manage the page reference count.
> + *
> + * At least one of FOLL_GET | FOLL_PIN must be set, so
> + * assert that here:
> + */
> + if (!(flags & (FOLL_GET | FOLL_PIN)))
> + return ERR_PTR(-EEXIST);
> +
> + if (flags & FOLL_TOUCH)
> + touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
> +
> + ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
> + if (!ctx->pgmap)
> + return ERR_PTR(-EFAULT);
> + }
> +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
> + page = pfn_to_page(pfn);
> +
> + if (!pud_devmap(pud) && !pud_write(pud) &&
> + gup_must_unshare(vma, flags, page))
> + return ERR_PTR(-EMLINK);
> +
> + ret = try_grab_page(page, flags);
> + if (ret)
> + page = ERR_PTR(ret);
> + else
> + ctx->page_mask = HPAGE_PUD_NR - 1;
> +
> + return page;
> +}
> +#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
> +static struct page *follow_huge_pud(struct vm_area_struct *vma,
> + unsigned long addr, pud_t *pudp,
> + int flags, struct follow_page_context *ctx)
> +{
> + return NULL;
> +}
> +#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
> +
> static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
> pte_t *pte, unsigned int flags)
> {
> @@ -760,11 +824,11 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
>
> pudp = pud_offset(p4dp, address);
> pud = READ_ONCE(*pudp);
> - if (pud_none(pud))
> + if (pud_none(pud) || !pud_present(pud))
> return no_page_table(vma, flags, address);
Isn't 'pud_none() || !pud_present()' redundent? A none pud is
non-present, by definition?
> - if (pud_devmap(pud)) {
> + if (pud_huge(pud)) {
> ptl = pud_lock(mm, pudp);
> - page = follow_devmap_pud(vma, address, pudp, flags, &ctx->pgmap);
> + page = follow_huge_pud(vma, address, pudp, flags, ctx);
> spin_unlock(ptl);
> if (page)
> return page;
Otherwise it looks OK to me
Reviewed-by: Jason Gunthorpe <jgg@...dia.com>
Jason
Powered by blists - more mailing lists