[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e6ppwz5t4p4kvir6eqzoto4y5fmdjdxdyvxvtw43ncly4l4ogr@7ruqsay6i2h2>
Date: Wed, 6 Nov 2024 12:44:45 +0200
From: "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>
To: Zi Yan <ziy@...dia.com>
Cc: linux-mm@...ck.org, "Matthew Wilcox (Oracle)" <willy@...radead.org>,
Ryan Roberts <ryan.roberts@....com>, Hugh Dickins <hughd@...gle.com>,
David Hildenbrand <david@...hat.com>, Yang Shi <yang@...amperecomputing.com>,
Miaohe Lin <linmiaohe@...wei.com>, Kefeng Wang <wangkefeng.wang@...wei.com>,
Yu Zhao <yuzhao@...gle.com>, John Hubbard <jhubbard@...dia.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 1/6] mm/huge_memory: add two new (yet used) functions
for folio_split()
On Fri, Nov 01, 2024 at 11:03:52AM -0400, Zi Yan wrote:
> This is a preparation patch, both added functions are not used yet.
>
In subject: s/yet/not yet/
> The added __folio_split_without_mapping() is able to split a folio with
> its mapping removed in two manners: 1) uniform split (the existing way),
> and 2) buddy allocator like split.
>
> The added __split_folio_to_order() can split a folio into any lower order.
> For uniform split, __folio_split_without_mapping() calls it once to split
> the given folio to the new order. For buddy allocator split,
> __folio_split_without_mapping() calls it (folio_order - new_order) times
> and each time splits the folio containing the given page to one lower
> order.
>
> Signed-off-by: Zi Yan <ziy@...dia.com>
> ---
> mm/huge_memory.c | 328 ++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 327 insertions(+), 1 deletion(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index f92068864469..f7649043ddb7 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -3135,7 +3135,6 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags)
> static void lru_add_page_tail(struct folio *folio, struct page *tail,
> struct lruvec *lruvec, struct list_head *list)
> {
> - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
> VM_BUG_ON_FOLIO(PageLRU(tail), folio);
> lockdep_assert_held(&lruvec->lru_lock);
>
> @@ -3379,6 +3378,333 @@ bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
> caller_pins;
> }
>
> +static long page_in_folio_offset(struct page *page, struct folio *folio)
> +{
> + long nr_pages = folio_nr_pages(folio);
> + unsigned long pages_pfn = page_to_pfn(page);
> + unsigned long folios_pfn = folio_pfn(folio);
> +
> + if (pages_pfn >= folios_pfn && pages_pfn < (folios_pfn + nr_pages))
> + return pages_pfn - folios_pfn;
> +
> + return -EINVAL;
> +}
> +
> +/*
> + * It splits @folio into @new_order folios and copies the @folio metadata to
> + * all the resulting folios.
> + */
> +static int __split_folio_to_order(struct folio *folio, int new_order)
> +{
> + int curr_order = folio_order(folio);
> + long nr_pages = folio_nr_pages(folio);
> + long new_nr_pages = 1 << new_order;
> + long index;
> +
> + if (curr_order <= new_order)
> + return -EINVAL;
> +
> + for (index = new_nr_pages; index < nr_pages; index += new_nr_pages) {
Hm. It is not clear why you skip the first new_nr_pages range. It worth a
comment.
> + struct page *head = &folio->page;
> + struct page *second_head = head + index;
I am not sure about 'second_head' name. Why it is better than page_tail?
> +
> + /*
> + * Careful: new_folio is not a "real" folio before we cleared PageTail.
> + * Don't pass it around before clear_compound_head().
> + */
> + struct folio *new_folio = (struct folio *)second_head;
> +
> + VM_BUG_ON_PAGE(atomic_read(&second_head->_mapcount) != -1, second_head);
> +
> + /*
> + * Clone page flags before unfreezing refcount.
> + *
> + * After successful get_page_unless_zero() might follow flags change,
> + * for example lock_page() which set PG_waiters.
> + *
> + * Note that for mapped sub-pages of an anonymous THP,
> + * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
> + * the migration entry instead from where remap_page() will restore it.
> + * We can still have PG_anon_exclusive set on effectively unmapped and
> + * unreferenced sub-pages of an anonymous THP: we can simply drop
> + * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
> + */
> + second_head->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
> + second_head->flags |= (head->flags &
> + ((1L << PG_referenced) |
> + (1L << PG_swapbacked) |
> + (1L << PG_swapcache) |
> + (1L << PG_mlocked) |
> + (1L << PG_uptodate) |
> + (1L << PG_active) |
> + (1L << PG_workingset) |
> + (1L << PG_locked) |
> + (1L << PG_unevictable) |
> +#ifdef CONFIG_ARCH_USES_PG_ARCH_2
> + (1L << PG_arch_2) |
> +#endif
> +#ifdef CONFIG_ARCH_USES_PG_ARCH_3
> + (1L << PG_arch_3) |
> +#endif
> + (1L << PG_dirty) |
> + LRU_GEN_MASK | LRU_REFS_MASK));
> +
> + /* ->mapping in first and second tail page is replaced by other uses */
> + VM_BUG_ON_PAGE(new_nr_pages > 2 && second_head->mapping != TAIL_MAPPING,
> + second_head);
> + second_head->mapping = head->mapping;
> + second_head->index = head->index + index;
> +
> + /*
> + * page->private should not be set in tail pages. Fix up and warn once
> + * if private is unexpectedly set.
> + */
> + if (unlikely(second_head->private)) {
> + VM_WARN_ON_ONCE_PAGE(true, second_head);
> + second_head->private = 0;
> + }
New line.
> + if (folio_test_swapcache(folio))
> + new_folio->swap.val = folio->swap.val + index;
> +
> + /* Page flags must be visible before we make the page non-compound. */
> + smp_wmb();
> +
> + /*
> + * Clear PageTail before unfreezing page refcount.
> + *
> + * After successful get_page_unless_zero() might follow put_page()
> + * which needs correct compound_head().
> + */
> + clear_compound_head(second_head);
> + if (new_order) {
> + prep_compound_page(second_head, new_order);
> + folio_set_large_rmappable(new_folio);
> +
> + folio_set_order(folio, new_order);
> + } else {
> + if (PageHead(head))
> + ClearPageCompound(head);
Huh? You only have to test for PageHead() because it is inside the loop.
It has to be done after loop is done.
> + }
> +
> + if (folio_test_young(folio))
> + folio_set_young(new_folio);
> + if (folio_test_idle(folio))
> + folio_set_idle(new_folio);
> +
> + folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
> + }
> +
> + return 0;
> +}
> +
> +#define for_each_folio_until_end_safe(iter, iter2, start, end) \
> + for (iter = start, iter2 = folio_next(start); \
> + iter != end; \
> + iter = iter2, iter2 = folio_next(iter2))
I am not sure if hiding it inside the macro helps reading the code.
> +
> +/*
> + * It splits a @folio (without mapping) to lower order smaller folios in two
> + * ways.
What do you mean by "without mapping". I initially thought that ->mapping
is NULL, but it is obviously not true.
Do you mean unmapped?
> + * 1. uniform split: the given @folio into multiple @new_order small folios,
> + * where all small folios have the same order. This is done when
> + * uniform_split is true.
> + * 2. buddy allocator like split: the given @folio is split into half and one
> + * of the half (containing the given page) is split into half until the
> + * given @page's order becomes @new_order. This is done when uniform_split is
> + * false.
> + *
> + * The high level flow for these two methods are:
> + * 1. uniform split: a single __split_folio_to_order() is called to split the
> + * @folio into @new_order, then we traverse all the resulting folios one by
> + * one in PFN ascending order and perform stats, unfreeze, adding to list,
> + * and file mapping index operations.
> + * 2. buddy allocator like split: in general, folio_order - @new_order calls to
> + * __split_folio_to_order() are called in the for loop to split the @folio
> + * to one lower order at a time. The resulting small folios are processed
> + * like what is done during the traversal in 1, except the one containing
> + * @page, which is split in next for loop.
> + *
> + * After splitting, the caller's folio reference will be transferred to the
> + * folio containing @page. The other folios may be freed if they are not mapped.
> + *
> + * In terms of locking, after splitting,
> + * 1. uniform split leaves @page (or the folio contains it) locked;
> + * 2. buddy allocator like split leaves @folio locked.
> + *
> + * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
> + *
> + * For !uniform_split, when -ENOMEM is returned, the original folio might be
> + * split. The caller needs to check the input folio.
> + */
> +static int __folio_split_without_mapping(struct folio *folio, int new_order,
> + struct page *page, struct list_head *list, pgoff_t end,
> + struct xa_state *xas, struct address_space *mapping,
> + bool uniform_split)
It is not clear what state xas has to be on call.
> +{
> + struct lruvec *lruvec;
> + struct address_space *swap_cache = NULL;
> + struct folio *origin_folio = folio;
> + struct folio *next_folio = folio_next(folio);
> + struct folio *new_folio;
> + struct folio *next;
> + int order = folio_order(folio);
> + int split_order = order - 1;
> + int nr_dropped = 0;
> + int ret = 0;
> +
> + if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
> + if (!uniform_split)
> + return -EINVAL;
Why this limitation?
> + swap_cache = swap_address_space(folio->swap);
> + xa_lock(&swap_cache->i_pages);
> + }
> +
> + if (folio_test_anon(folio))
> + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
> +
> + /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
> + lruvec = folio_lruvec_lock(folio);
> +
> + /*
> + * split to new_order one order at a time. For uniform split,
> + * intermediate orders are skipped
> + */
> + for (split_order = order - 1; split_order >= new_order; split_order--) {
> + int old_order = folio_order(folio);
> + struct folio *release;
> + struct folio *end_folio = folio_next(folio);
> + int status;
> + bool stop_split = false;
> +
> + if (folio_test_anon(folio) && split_order == 1)
Comment is missing.
> + continue;
> + if (uniform_split && split_order != new_order)
> + continue;
What the point in the loop for uniform_split?
> +
> + if (mapping) {
> + /*
> + * uniform split has xas_split_alloc() called before
> + * irq is disabled, since xas_nomem() might not be
> + * able to allocate enough memory.
> + */
> + if (uniform_split)
> + xas_split(xas, folio, old_order);
> + else {
> + xas_set_order(xas, folio->index, split_order);
> + xas_set_err(xas, -ENOMEM);
> + if (xas_nomem(xas, 0))
0 gfp?
> + xas_split(xas, folio, old_order);
> + else {
> + stop_split = true;
> + ret = -ENOMEM;
> + goto after_split;
> + }
> + }
> + }
> +
> + split_page_memcg(&folio->page, old_order, split_order);
__split_huge_page() has a comment for split_page_memcg(). Do we want to
keep it? Is it safe to call it under lruvec lock?
> + split_page_owner(&folio->page, old_order, split_order);
> + pgalloc_tag_split(folio, old_order, split_order);
> +
> + status = __split_folio_to_order(folio, split_order);
> +
> + if (status < 0)
> + return status;
> +
> +after_split:
> + /*
> + * Iterate through after-split folios and perform related
> + * operations. But in buddy allocator like split, the folio
> + * containing the specified page is skipped until its order
> + * is new_order, since the folio will be worked on in next
> + * iteration.
> + */
> + for_each_folio_until_end_safe(release, next, folio, end_folio) {
> + if (page_in_folio_offset(page, release) >= 0) {
> + folio = release;
> + if (split_order != new_order && !stop_split)
> + continue;
I don't understand this condition.
> + }
> + if (folio_test_anon(release))
> + mod_mthp_stat(folio_order(release),
> + MTHP_STAT_NR_ANON, 1);
Add { } around the block.
> +
> + /*
> + * Unfreeze refcount first. Additional reference from
> + * page cache.
> + */
> + folio_ref_unfreeze(release,
> + 1 + ((!folio_test_anon(origin_folio) ||
> + folio_test_swapcache(origin_folio)) ?
> + folio_nr_pages(release) : 0));
> +
> + if (release != origin_folio)
> + lru_add_page_tail(origin_folio, &release->page,
> + lruvec, list);
> +
> + /* Some pages can be beyond EOF: drop them from page cache */
> + if (release->index >= end) {
> + if (shmem_mapping(origin_folio->mapping))
> + nr_dropped++;
> + else if (folio_test_clear_dirty(release))
> + folio_account_cleaned(release,
> + inode_to_wb(origin_folio->mapping->host));
> + __filemap_remove_folio(release, NULL);
> + folio_put(release);
> + } else if (!folio_test_anon(release)) {
> + __xa_store(&origin_folio->mapping->i_pages,
> + release->index, &release->page, 0);
> + } else if (swap_cache) {
> + __xa_store(&swap_cache->i_pages,
> + swap_cache_index(release->swap),
> + &release->page, 0);
> + }
> + }
> + xas_destroy(xas);
> + }
> +
> + unlock_page_lruvec(lruvec);
> +
> + if (folio_test_anon(origin_folio)) {
> + if (folio_test_swapcache(origin_folio))
> + xa_unlock(&swap_cache->i_pages);
> + } else
> + xa_unlock(&mapping->i_pages);
> +
> + /* Caller disabled irqs, so they are still disabled here */
> + local_irq_enable();
> +
> + if (nr_dropped)
> + shmem_uncharge(mapping->host, nr_dropped);
> +
> + remap_page(origin_folio, 1 << order,
> + folio_test_anon(origin_folio) ?
> + RMP_USE_SHARED_ZEROPAGE : 0);
> +
> + /*
> + * At this point, folio should contain the specified page, so that it
> + * will be left to the caller to unlock it.
> + */
> + for_each_folio_until_end_safe(new_folio, next, origin_folio, next_folio) {
> + if (uniform_split && new_folio == folio)
> + continue;
> + if (!uniform_split && new_folio == origin_folio)
> + continue;
> +
> + folio_unlock(new_folio);
> + /*
> + * Subpages may be freed if there wasn't any mapping
> + * like if add_to_swap() is running on a lru page that
> + * had its mapping zapped. And freeing these pages
> + * requires taking the lru_lock so we do the put_page
> + * of the tail pages after the split is complete.
> + */
> + free_page_and_swap_cache(&new_folio->page);
> + }
> + return ret;
> +}
> +
> /*
> * This function splits a large folio into smaller folios of order @new_order.
> * @page can point to any page of the large folio to split. The split operation
> --
> 2.45.2
>
--
Kiryl Shutsemau / Kirill A. Shutemov
Powered by blists - more mailing lists