[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <58f9d004-918d-4f66-85e7-d090db0af5de@redhat.com>
Date: Wed, 18 Jun 2025 10:54:38 +0200
From: David Hildenbrand <david@...hat.com>
To: lizhe.67@...edance.com
Cc: akpm@...ux-foundation.org, alex.williamson@...hat.com,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
peterx@...hat.com
Subject: Re: [PATCH v4 3/3] vfio/type1: optimize vfio_unpin_pages_remote() for
large folio
On 18.06.25 08:11, lizhe.67@...edance.com wrote:
> On Tue, 17 Jun 2025 15:47:09 +0200, david@...hat.com wrote:
>
>>> How do you think of this implementation?
>>>
>>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>>> index 242b05671502..eb91f99ea973 100644
>>> --- a/include/linux/mm.h
>>> +++ b/include/linux/mm.h
>>> @@ -2165,6 +2165,23 @@ static inline long folio_nr_pages(const struct folio *folio)
>>> return folio_large_nr_pages(folio);
>>> }
>>>
>>> +/*
>>> + * folio_remaining_pages - Counts the number of pages from a given
>>> + * start page to the end of the folio.
>>> + *
>>> + * @folio: Pointer to folio
>>> + * @start_page: The starting page from which to begin counting.
>>> + *
>>> + * Returned number includes the provided start page.
>>> + *
>>> + * The caller must ensure that @start_page belongs to @folio.
>>> + */
>>> +static inline unsigned long folio_remaining_pages(struct folio *folio,
>>> + struct page *start_page)
>>> +{
>>> + return folio_nr_pages(folio) - folio_page_idx(folio, start_page);
>>> +}
>>> +
>>> /* Only hugetlbfs can allocate folios larger than MAX_ORDER */
>>> #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
>>> #define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
>>> diff --git a/mm/gup.c b/mm/gup.c
>>> index 15debead5f5b..14ae2e3088b4 100644
>>> --- a/mm/gup.c
>>> +++ b/mm/gup.c
>>> @@ -242,7 +242,7 @@ static inline struct folio *gup_folio_range_next(struct page *start,
>>>
>>> if (folio_test_large(folio))
>>> nr = min_t(unsigned int, npages - i,
>>> - folio_nr_pages(folio) - folio_page_idx(folio, next));
>>> + folio_remaining_pages(folio, next));
>>>
>>> *ntails = nr;
>>> return folio;
>>> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
>>> index b2fc5266e3d2..34e85258060c 100644
>>> --- a/mm/page_isolation.c
>>> +++ b/mm/page_isolation.c
>>> @@ -96,7 +96,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
>>> return page;
>>> }
>>>
>>> - skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page);
>>> + skip_pages = folio_remaining_pages(folio, page);
>>> pfn += skip_pages - 1;
>>> continue;
>>> }
>>> ---
>>
>> Guess I would have pulled the "min" in there, but passing something like
>> ULONG_MAX for the page_isolation case also looks rather ugly.
>
> Yes, the page_isolation case does not require the 'min' logic. Since
> there are already places in the current kernel code where
> folio_remaining_pages() is used without needing min, we could simply
> create a custom wrapper function based on folio_remaining_pages() only
> in those specific scenarios where min is necessary.
I would just do something like that, removing gup_folio_range_next() completely.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 98a606908307b..6c224b1c6c169 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1995,6 +1995,32 @@ static inline long folio_nr_pages(const struct folio *folio)
return folio_large_nr_pages(folio);
}
+/**
+ * folio_remaining_pages - The remaining number of pages in the folio.
+ * @folio: The folio.
+ * @page: The folio page to start the counting.
+ * @max_npages: Limit how far to count.
+ *
+ * The returned number will include the provided page.
+ *
+ * The caller must ensure that @page belongs to @folio. When setting
+ * @max_npages to ULONG_MAX, the parameter is effectively ignored.
+ *
+ * Return: The remaining number of pages, limited by @max_npages.
+ */
+static inline unsigned long folio_remaining_pages(struct folio *folio,
+ struct page *page, unsigned long max_npages)
+{
+ unsigned long nr;
+
+ if (!folio_test_large(folio))
+ return 1;
+ nr = folio_large_nr_pages(folio) - folio_page_idx(folio, page);
+ if (__builtin_constant_p(max_npages) && max_npages == ULONG_MAX)
+ return nr;
+ return min_t(unsigned long, nr, max_npages);
+}
+
/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
diff --git a/mm/gup.c b/mm/gup.c
index 6888e871a74a9..3385428d028f6 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -234,21 +234,6 @@ void folio_add_pin(struct folio *folio)
}
}
-static inline struct folio *gup_folio_range_next(struct page *start,
- unsigned long npages, unsigned long i, unsigned int *ntails)
-{
- struct page *next = nth_page(start, i);
- struct folio *folio = page_folio(next);
- unsigned int nr = 1;
-
- if (folio_test_large(folio))
- nr = min_t(unsigned int, npages - i,
- folio_nr_pages(folio) - folio_page_idx(folio, next));
-
- *ntails = nr;
- return folio;
-}
-
static inline struct folio *gup_folio_next(struct page **list,
unsigned long npages, unsigned long i, unsigned int *ntails)
{
@@ -356,11 +341,13 @@ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty)
{
unsigned long i;
- struct folio *folio;
unsigned int nr;
for (i = 0; i < npages; i += nr) {
- folio = gup_folio_range_next(page, npages, i, &nr);
+ struct page *next = nth_page(page, i);
+ struct folio *folio = page_folio(next);
+
+ nr = folio_remaining_pages(folio, next, npages - i);
if (make_dirty && !folio_test_dirty(folio)) {
folio_lock(folio);
folio_mark_dirty(folio);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index ece3bfc56bcd5..6f75defb38d73 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -96,7 +96,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
return page;
}
- skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page);
+ skip_pages = folio_remaining_pages(folio, page, ULONG_MAX);
pfn += skip_pages - 1;
continue;
}
--
But up to you, anything with such a helper function makes the code easier to digest.
--
Cheers,
David / dhildenb
Powered by blists - more mailing lists