[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0a28adde-acaf-4d55-96ba-c32d6113285f@nvidia.com>
Date: Sat, 6 Sep 2025 22:14:19 -0700
From: John Hubbard <jhubbard@...dia.com>
To: David Hildenbrand <david@...hat.com>, linux-kernel@...r.kernel.org
Cc: Alexander Potapenko <glider@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Brendan Jackman <jackmanb@...gle.com>, Christoph Lameter <cl@...two.org>,
Dennis Zhou <dennis@...nel.org>, Dmitry Vyukov <dvyukov@...gle.com>,
dri-devel@...ts.freedesktop.org, intel-gfx@...ts.freedesktop.org,
iommu@...ts.linux.dev, io-uring@...r.kernel.org,
Jason Gunthorpe <jgg@...dia.com>, Jens Axboe <axboe@...nel.dk>,
Johannes Weiner <hannes@...xchg.org>, kasan-dev@...glegroups.com,
kvm@...r.kernel.org, "Liam R. Howlett" <Liam.Howlett@...cle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>, linux-arm-kernel@...s.com,
linux-arm-kernel@...ts.infradead.org, linux-crypto@...r.kernel.org,
linux-ide@...r.kernel.org, linux-kselftest@...r.kernel.org,
linux-mips@...r.kernel.org, linux-mmc@...r.kernel.org, linux-mm@...ck.org,
linux-riscv@...ts.infradead.org, linux-s390@...r.kernel.org,
linux-scsi@...r.kernel.org, Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Marco Elver <elver@...gle.com>, Marek Szyprowski <m.szyprowski@...sung.com>,
Michal Hocko <mhocko@...e.com>, Mike Rapoport <rppt@...nel.org>,
Muchun Song <muchun.song@...ux.dev>, netdev@...r.kernel.org,
Oscar Salvador <osalvador@...e.de>, Peter Xu <peterx@...hat.com>,
Robin Murphy <robin.murphy@....com>, Suren Baghdasaryan <surenb@...gle.com>,
Tejun Heo <tj@...nel.org>, virtualization@...ts.linux.dev,
Vlastimil Babka <vbabka@...e.cz>, wireguard@...ts.zx2c4.com, x86@...nel.org,
Zi Yan <ziy@...dia.com>
Subject: Re: [PATCH v2 19/37] mm/gup: remove record_subpages()
On 9/5/25 11:56 PM, David Hildenbrand wrote:
> On 06.09.25 03:05, John Hubbard wrote:
>> On 9/1/25 8:03 AM, David Hildenbrand wrote:
...> Well, there is a lot I dislike about record_subpages() to go back
there.
> Starting with "as Willy keeps explaining, the concept of subpages do
> not exist and ending with "why do we fill out the array even on failure".
>
> :)
I am also very glad to see the entire concept of subpages disappear.
>>
>> Now it's been returned to it's original, cryptic form.
>>
>
> The code in the caller was so uncryptic that both me and Lorenzo missed
> that magical addition. :P
>
>> Just my take on it, for whatever that's worth. :)
>
> As always, appreciated.
>
> I could of course keep the simple loop in some "record_folio_pages"
> function and clean up what I dislike about record_subpages().
>
> But I much rather want the call chain to be cleaned up instead, if
> possible.
>
Right! The primary way that record_subpages() helped was in showing
what was going on: a function call helps a lot to self-document,
sometimes.
>
> Roughly, what I am thinking (limiting it to pte+pmd case) about is the
> following:
The code below looks much cleaner, that's great!
thanks,
--
John Hubbard
>
>
> From d6d6d21dbf435d8030782a627175e36e6c7b2dfb Mon Sep 17 00:00:00 2001
> From: David Hildenbrand <david@...hat.com>
> Date: Sat, 6 Sep 2025 08:33:42 +0200
> Subject: [PATCH] tmp
>
> Signed-off-by: David Hildenbrand <david@...hat.com>
> ---
> mm/gup.c | 79 ++++++++++++++++++++++++++------------------------------
> 1 file changed, 36 insertions(+), 43 deletions(-)
>
> diff --git a/mm/gup.c b/mm/gup.c
> index 22420f2069ee1..98907ead749c0 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -2845,12 +2845,11 @@ static void __maybe_unused
> gup_fast_undo_dev_pagemap(int *nr, int nr_start,
> * also check pmd here to make sure pmd doesn't change (corresponds to
> * pmdp_collapse_flush() in the THP collapse code path).
> */
> -static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
> - unsigned long end, unsigned int flags, struct page **pages,
> - int *nr)
> +static unsigned long gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp,
> unsigned long addr,
> + unsigned long end, unsigned int flags, struct page **pages)
> {
> struct dev_pagemap *pgmap = NULL;
> - int ret = 0;
> + unsigned long nr_pages = 0;
> pte_t *ptep, *ptem;
>
> ptem = ptep = pte_offset_map(&pmd, addr);
> @@ -2908,24 +2907,20 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t
> *pmdp, unsigned long addr,
> * details.
> */
> if (flags & FOLL_PIN) {
> - ret = arch_make_folio_accessible(folio);
> - if (ret) {
> + if (arch_make_folio_accessible(folio)) {
> gup_put_folio(folio, 1, flags);
> goto pte_unmap;
> }
> }
> folio_set_referenced(folio);
> - pages[*nr] = page;
> - (*nr)++;
> + pages[nr_pages++] = page;
> } while (ptep++, addr += PAGE_SIZE, addr != end);
>
> - ret = 1;
> -
> pte_unmap:
> if (pgmap)
> put_dev_pagemap(pgmap);
> pte_unmap(ptem);
> - return ret;
> + return nr_pages;
> }
> #else
>
> @@ -2938,21 +2933,24 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t
> *pmdp, unsigned long addr,
> * get_user_pages_fast_only implementation that can pin pages. Thus
> it's still
> * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
> */
> -static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
> - unsigned long end, unsigned int flags, struct page **pages,
> - int *nr)
> +static unsigned long gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp,
> unsigned long addr,
> + unsigned long end, unsigned int flags, struct page **pages)
> {
> return 0;
> }
> #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
>
> -static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
> - unsigned long end, unsigned int flags, struct page **pages,
> - int *nr)
> +static unsigned long gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp,
> unsigned long addr,
> + unsigned long end, unsigned int flags, struct page **pages)
> {
> + const unsigned long nr_pages = (end - addr) >> PAGE_SHIFT;
> struct page *page;
> struct folio *folio;
> - int refs;
> + unsigned long i;
> +
> + /* See gup_fast_pte_range() */
> + if (pmd_protnone(orig))
> + return 0;
>
> if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
> return 0;
> @@ -2960,33 +2958,30 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t
> *pmdp, unsigned long addr,
> if (pmd_special(orig))
> return 0;
>
> - refs = (end - addr) >> PAGE_SHIFT;
> page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
>
> - folio = try_grab_folio_fast(page, refs, flags);
> + folio = try_grab_folio_fast(page, nr_pages, flags);
> if (!folio)
> return 0;
>
> if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
> - gup_put_folio(folio, refs, flags);
> + gup_put_folio(folio, nr_pages, flags);
> return 0;
> }
>
> if (!gup_fast_folio_allowed(folio, flags)) {
> - gup_put_folio(folio, refs, flags);
> + gup_put_folio(folio, nr_pages, flags);
> return 0;
> }
> if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio-
> >page)) {
> - gup_put_folio(folio, refs, flags);
> + gup_put_folio(folio, nr_pages, flags);
> return 0;
> }
>
> - pages += *nr;
> - *nr += refs;
> - for (; refs; refs--)
> + for (i = 0; i < nr_pages; i++)
> *(pages++) = page++;
> folio_set_referenced(folio);
> - return 1;
> + return nr_pages;
> }
>
> static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
> @@ -3033,11 +3028,11 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t
> *pudp, unsigned long addr,
> return 1;
> }
>
> -static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
> - unsigned long end, unsigned int flags, struct page **pages,
> - int *nr)
> +static unsigned long gup_fast_pmd_range(pud_t *pudp, pud_t pud,
> unsigned long addr,
> + unsigned long end, unsigned int flags, struct page **pages)
> {
> - unsigned long next;
> + unsigned long cur_nr_pages, next;
> + unsigned long nr_pages = 0;
> pmd_t *pmdp;
>
> pmdp = pmd_offset_lockless(pudp, pud, addr);
> @@ -3046,23 +3041,21 @@ static int gup_fast_pmd_range(pud_t *pudp, pud_t
> pud, unsigned long addr,
>
> next = pmd_addr_end(addr, end);
> if (!pmd_present(pmd))
> - return 0;
> + break;
>
> - if (unlikely(pmd_leaf(pmd))) {
> - /* See gup_fast_pte_range() */
> - if (pmd_protnone(pmd))
> - return 0;
> + if (unlikely(pmd_leaf(pmd)))
> + cur_nr_pages = gup_fast_pmd_leaf(pmd, pmdp, addr, next,
> flags, pages);
> + else
> + cur_nr_pages = gup_fast_pte_range(pmd, pmdp, addr, next,
> flags, pages);
>
> - if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
> - pages, nr))
> - return 0;
> + nr_pages += cur_nr_pages;
> + pages += cur_nr_pages;
>
> - } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
> - pages, nr))
> - return 0;
> + if (nr_pages != (next - addr) >> PAGE_SIZE)
> + break;
> } while (pmdp++, addr = next, addr != end);
>
> - return 1;
> + return nr_pages;
> }
>
> static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
Powered by blists - more mailing lists