[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9513cfc9-226a-4696-8dc9-4eda6033f47d@lucifer.local>
Date: Fri, 18 Jul 2025 20:10:14 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Zi Yan <ziy@...dia.com>
Cc: David Hildenbrand <david@...hat.com>, linux-mm@...ck.org,
Andrew Morton <akpm@...ux-foundation.org>,
Dan Carpenter <dan.carpenter@...aro.org>,
Antonio Quartulli <antonio@...delbit.com>,
Hugh Dickins <hughd@...gle.com>,
Kirill Shutemov <k.shutemov@...il.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Nico Pache <npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
Balbir Singh <balbirs@...dia.com>,
Matthew Brost <matthew.brost@...el.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 3/6] mm/huge_memory: deduplicate code in
__folio_split().
On Fri, Jul 18, 2025 at 02:37:17PM -0400, Zi Yan wrote:
> xas unlock, remap_page(), local_irq_enable() are moved out of if branches
> to deduplicate the code. While at it, add remap_flags to clean up
> remap_page() call site. nr_dropped is renamed to nr_shmem_dropped, as it
> becomes a variable at __folio_split() scope.
>
> Signed-off-by: Zi Yan <ziy@...dia.com>
> Acked-by: David Hildenbrand <david@...hat.com>
Sorry I thought I'd reviewed this.
Have looked through carefully and the logic looks correct to me, so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> mm/huge_memory.c | 73 +++++++++++++++++++++++-------------------------
> 1 file changed, 35 insertions(+), 38 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index e01359008b13..d36f7bdaeb38 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -3595,6 +3595,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
> struct anon_vma *anon_vma = NULL;
> int order = folio_order(folio);
> struct folio *new_folio, *next;
> + int nr_shmem_dropped = 0;
> + int remap_flags = 0;
> int extra_pins, ret;
> pgoff_t end;
> bool is_hzp;
> @@ -3718,15 +3720,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
> */
> xas_lock(&xas);
> xas_reset(&xas);
> - if (xas_load(&xas) != folio)
> + if (xas_load(&xas) != folio) {
> + ret = -EAGAIN;
> goto fail;
> + }
> }
>
> /* Prevent deferred_split_scan() touching ->_refcount */
> spin_lock(&ds_queue->split_queue_lock);
> if (folio_ref_freeze(folio, 1 + extra_pins)) {
> struct address_space *swap_cache = NULL;
> - int nr_dropped = 0;
> struct lruvec *lruvec;
>
> if (folio_order(folio) > 1 &&
> @@ -3798,7 +3801,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
> /* Some pages can be beyond EOF: drop them from cache */
> if (new_folio->index >= end) {
> if (shmem_mapping(mapping))
> - nr_dropped += folio_nr_pages(new_folio);
> + nr_shmem_dropped += folio_nr_pages(new_folio);
> else if (folio_test_clear_dirty(new_folio))
> folio_account_cleaned(
> new_folio,
> @@ -3828,47 +3831,41 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>
> if (swap_cache)
> xa_unlock(&swap_cache->i_pages);
> - if (mapping)
> - xas_unlock(&xas);
> + } else {
> + spin_unlock(&ds_queue->split_queue_lock);
> + ret = -EAGAIN;
> + }
> +fail:
> + if (mapping)
> + xas_unlock(&xas);
> +
> + local_irq_enable();
>
> - local_irq_enable();
> + if (nr_shmem_dropped)
> + shmem_uncharge(mapping->host, nr_shmem_dropped);
>
> - if (nr_dropped)
> - shmem_uncharge(mapping->host, nr_dropped);
> + if (!ret && is_anon)
> + remap_flags = RMP_USE_SHARED_ZEROPAGE;
> + remap_page(folio, 1 << order, remap_flags);
>
> - remap_page(folio, 1 << order,
> - !ret && folio_test_anon(folio) ?
> - RMP_USE_SHARED_ZEROPAGE :
> - 0);
> + /*
> + * Unlock all after-split folios except the one containing
> + * @lock_at page. If @folio is not split, it will be kept locked.
> + */
> + for (new_folio = folio; new_folio != end_folio; new_folio = next) {
> + next = folio_next(new_folio);
> + if (new_folio == page_folio(lock_at))
> + continue;
>
> + folio_unlock(new_folio);
> /*
> - * Unlock all after-split folios except the one containing
> - * @lock_at page. If @folio is not split, it will be kept locked.
> + * Subpages may be freed if there wasn't any mapping
> + * like if add_to_swap() is running on a lru page that
> + * had its mapping zapped. And freeing these pages
> + * requires taking the lru_lock so we do the put_page
> + * of the tail pages after the split is complete.
> */
> - for (new_folio = folio; new_folio != end_folio;
> - new_folio = next) {
> - next = folio_next(new_folio);
> - if (new_folio == page_folio(lock_at))
> - continue;
> -
> - folio_unlock(new_folio);
> - /*
> - * Subpages may be freed if there wasn't any mapping
> - * like if add_to_swap() is running on a lru page that
> - * had its mapping zapped. And freeing these pages
> - * requires taking the lru_lock so we do the put_page
> - * of the tail pages after the split is complete.
> - */
> - free_folio_and_swap_cache(new_folio);
> - }
> - } else {
> - spin_unlock(&ds_queue->split_queue_lock);
> -fail:
> - if (mapping)
> - xas_unlock(&xas);
> - local_irq_enable();
> - remap_page(folio, folio_nr_pages(folio), 0);
> - ret = -EAGAIN;
> + free_folio_and_swap_cache(new_folio);
> }
>
> out_unlock:
> --
> 2.47.2
>
Powered by blists - more mailing lists