lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <D129A3F2-D79C-482E-BC70-A26C781B149E@nvidia.com>
Date: Fri, 04 Jul 2025 07:24:17 -0400
From: Zi Yan <ziy@...dia.com>
To: Balbir Singh <balbirs@...dia.com>
Cc: linux-mm@...ck.org, akpm@...ux-foundation.org,
 linux-kernel@...r.kernel.org, Karol Herbst <kherbst@...hat.com>,
 Lyude Paul <lyude@...hat.com>, Danilo Krummrich <dakr@...nel.org>,
 David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>,
 Jérôme Glisse <jglisse@...hat.com>,
 Shuah Khan <shuah@...nel.org>, David Hildenbrand <david@...hat.com>,
 Barry Song <baohua@...nel.org>, Baolin Wang <baolin.wang@...ux.alibaba.com>,
 Ryan Roberts <ryan.roberts@....com>, Matthew Wilcox <willy@...radead.org>,
 Peter Xu <peterx@...hat.com>, Kefeng Wang <wangkefeng.wang@...wei.com>,
 Jane Chu <jane.chu@...cle.com>, Alistair Popple <apopple@...dia.com>,
 Donet Tom <donettom@...ux.ibm.com>
Subject: Re: [v1 resend 08/12] mm/thp: add split during migration support

On 3 Jul 2025, at 19:35, Balbir Singh wrote:

> Support splitting pages during THP zone device migration as needed.
> The common case that arises is that after setup, during migrate
> the destination might not be able to allocate MIGRATE_PFN_COMPOUND
> pages.
>
> Add a new routine migrate_vma_split_pages() to support the splitting
> of already isolated pages. The pages being migrated are already unmapped
> and marked for migration during setup (via unmap). folio_split() and
> __split_unmapped_folio() take additional isolated arguments, to avoid
> unmapping and remaping these pages and unlocking/putting the folio.
>
> Cc: Karol Herbst <kherbst@...hat.com>
> Cc: Lyude Paul <lyude@...hat.com>
> Cc: Danilo Krummrich <dakr@...nel.org>
> Cc: David Airlie <airlied@...il.com>
> Cc: Simona Vetter <simona@...ll.ch>
> Cc: "Jérôme Glisse" <jglisse@...hat.com>
> Cc: Shuah Khan <shuah@...nel.org>
> Cc: David Hildenbrand <david@...hat.com>
> Cc: Barry Song <baohua@...nel.org>
> Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>
> Cc: Ryan Roberts <ryan.roberts@....com>
> Cc: Matthew Wilcox <willy@...radead.org>
> Cc: Peter Xu <peterx@...hat.com>
> Cc: Zi Yan <ziy@...dia.com>
> Cc: Kefeng Wang <wangkefeng.wang@...wei.com>
> Cc: Jane Chu <jane.chu@...cle.com>
> Cc: Alistair Popple <apopple@...dia.com>
> Cc: Donet Tom <donettom@...ux.ibm.com>
>
> Signed-off-by: Balbir Singh <balbirs@...dia.com>
> ---
>  include/linux/huge_mm.h | 11 ++++++--
>  mm/huge_memory.c        | 54 ++++++++++++++++++++-----------------
>  mm/migrate_device.c     | 59 ++++++++++++++++++++++++++++++++---------
>  3 files changed, 85 insertions(+), 39 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 65a1bdf29bb9..5f55a754e57c 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -343,8 +343,8 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
>  		vm_flags_t vm_flags);
>
>  bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
> -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> -		unsigned int new_order);
> +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> +		unsigned int new_order, bool isolated);
>  int min_order_for_split(struct folio *folio);
>  int split_folio_to_list(struct folio *folio, struct list_head *list);
>  bool uniform_split_supported(struct folio *folio, unsigned int new_order,
> @@ -353,6 +353,13 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
>  		bool warns);
>  int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
>  		struct list_head *list);
> +
> +static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> +		unsigned int new_order)
> +{
> +	return __split_huge_page_to_list_to_order(page, list, new_order, false);
> +}
> +
>  /*
>   * try_folio_split - try to split a @folio at @page using non uniform split.
>   * @folio: folio to be split
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index d55e36ae0c39..e00ddfed22fa 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -3424,15 +3424,6 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
>  		new_folio->mapping = folio->mapping;
>  		new_folio->index = folio->index + i;
>
> -		/*
> -		 * page->private should not be set in tail pages. Fix up and warn once
> -		 * if private is unexpectedly set.
> -		 */
> -		if (unlikely(new_folio->private)) {
> -			VM_WARN_ON_ONCE_PAGE(true, new_head);
> -			new_folio->private = NULL;
> -		}
> -
>  		if (folio_test_swapcache(folio))
>  			new_folio->swap.val = folio->swap.val + i;
>
> @@ -3519,7 +3510,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
>  		struct page *split_at, struct page *lock_at,
>  		struct list_head *list, pgoff_t end,
>  		struct xa_state *xas, struct address_space *mapping,
> -		bool uniform_split)
> +		bool uniform_split, bool isolated)
>  {
>  	struct lruvec *lruvec;
>  	struct address_space *swap_cache = NULL;
> @@ -3643,8 +3634,9 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
>  				percpu_ref_get_many(&release->pgmap->ref,
>  							(1 << new_order) - 1);
>
> -			lru_add_split_folio(origin_folio, release, lruvec,
> -					list);
> +			if (!isolated)
> +				lru_add_split_folio(origin_folio, release,
> +							lruvec, list);
>
>  			/* Some pages can be beyond EOF: drop them from cache */
>  			if (release->index >= end) {
> @@ -3697,6 +3689,12 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
>  	if (nr_dropped)
>  		shmem_uncharge(mapping->host, nr_dropped);
>
> +	/*
> +	 * Don't remap and unlock isolated folios
> +	 */
> +	if (isolated)
> +		return ret;
> +
>  	remap_page(origin_folio, 1 << order,
>  			folio_test_anon(origin_folio) ?
>  				RMP_USE_SHARED_ZEROPAGE : 0);
> @@ -3790,6 +3788,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
>   * @lock_at: a page within @folio to be left locked to caller
>   * @list: after-split folios will be put on it if non NULL
>   * @uniform_split: perform uniform split or not (non-uniform split)
> + * @isolated: The pages are already unmapped

s/pages/folio

Why name it isolated if the folio is unmapped? Isolated folios often mean
they are removed from LRU lists. isolated here causes confusion.

>   *
>   * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
>   * It is in charge of checking whether the split is supported or not and
> @@ -3800,7 +3799,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
>   */
>  static int __folio_split(struct folio *folio, unsigned int new_order,
>  		struct page *split_at, struct page *lock_at,
> -		struct list_head *list, bool uniform_split)
> +		struct list_head *list, bool uniform_split, bool isolated)
>  {
>  	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
>  	XA_STATE(xas, &folio->mapping->i_pages, folio->index);
> @@ -3846,14 +3845,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>  		 * is taken to serialise against parallel split or collapse
>  		 * operations.
>  		 */
> -		anon_vma = folio_get_anon_vma(folio);
> -		if (!anon_vma) {
> -			ret = -EBUSY;
> -			goto out;
> +		if (!isolated) {
> +			anon_vma = folio_get_anon_vma(folio);
> +			if (!anon_vma) {
> +				ret = -EBUSY;
> +				goto out;
> +			}
> +			anon_vma_lock_write(anon_vma);
>  		}
>  		end = -1;
>  		mapping = NULL;
> -		anon_vma_lock_write(anon_vma);
>  	} else {
>  		unsigned int min_order;
>  		gfp_t gfp;
> @@ -3920,7 +3921,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>  		goto out_unlock;
>  	}
>
> -	unmap_folio(folio);
> +	if (!isolated)
> +		unmap_folio(folio);
>
>  	/* block interrupt reentry in xa_lock and spinlock */
>  	local_irq_disable();
> @@ -3973,14 +3975,15 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>
>  		ret = __split_unmapped_folio(folio, new_order,
>  				split_at, lock_at, list, end, &xas, mapping,
> -				uniform_split);
> +				uniform_split, isolated);
>  	} else {
>  		spin_unlock(&ds_queue->split_queue_lock);
>  fail:
>  		if (mapping)
>  			xas_unlock(&xas);
>  		local_irq_enable();
> -		remap_page(folio, folio_nr_pages(folio), 0);
> +		if (!isolated)
> +			remap_page(folio, folio_nr_pages(folio), 0);
>  		ret = -EAGAIN;
>  	}

These "isolated" special handlings does not look good, I wonder if there
is a way of letting split code handle device private folios more gracefully.
It also causes confusions, since why does "isolated/unmapped" folios
not need to unmap_page(), remap_page(), or unlock?


>
> @@ -4046,12 +4049,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>   * Returns -EINVAL when trying to split to an order that is incompatible
>   * with the folio. Splitting to order 0 is compatible with all folios.
>   */
> -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> -				     unsigned int new_order)
> +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> +				     unsigned int new_order, bool isolated)
>  {
>  	struct folio *folio = page_folio(page);
>
> -	return __folio_split(folio, new_order, &folio->page, page, list, true);
> +	return __folio_split(folio, new_order, &folio->page, page, list, true,
> +				isolated);
>  }
>
>  /*
> @@ -4080,7 +4084,7 @@ int folio_split(struct folio *folio, unsigned int new_order,
>  		struct page *split_at, struct list_head *list)
>  {
>  	return __folio_split(folio, new_order, split_at, &folio->page, list,
> -			false);
> +			false, false);
>  }
>
>  int min_order_for_split(struct folio *folio)
> diff --git a/mm/migrate_device.c b/mm/migrate_device.c
> index 41d0bd787969..acd2f03b178d 100644
> --- a/mm/migrate_device.c
> +++ b/mm/migrate_device.c
> @@ -813,6 +813,24 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
>  		src[i] &= ~MIGRATE_PFN_MIGRATE;
>  	return 0;
>  }
> +
> +static void migrate_vma_split_pages(struct migrate_vma *migrate,
> +					unsigned long idx, unsigned long addr,
> +					struct folio *folio)
> +{
> +	unsigned long i;
> +	unsigned long pfn;
> +	unsigned long flags;
> +
> +	folio_get(folio);
> +	split_huge_pmd_address(migrate->vma, addr, true);
> +	__split_huge_page_to_list_to_order(folio_page(folio, 0), NULL, 0, true);

If you need to split PMD entries here, why not let unmap_page() and remap_page()
in split code does that?

--
Best Regards,
Yan, Zi

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ