[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <715fc271-1af3-4061-b217-e3d6e32849c6@redhat.com>
Date: Fri, 4 Jul 2025 09:43:54 +0300
From: Mika Penttilä <mpenttil@...hat.com>
To: Balbir Singh <balbirs@...dia.com>, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
Karol Herbst <kherbst@...hat.com>, Lyude Paul <lyude@...hat.com>,
Danilo Krummrich <dakr@...nel.org>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>, Jérôme Glisse
<jglisse@...hat.com>, Shuah Khan <shuah@...nel.org>,
David Hildenbrand <david@...hat.com>, Barry Song <baohua@...nel.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Ryan Roberts <ryan.roberts@....com>, Matthew Wilcox <willy@...radead.org>,
Peter Xu <peterx@...hat.com>, Zi Yan <ziy@...dia.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>, Jane Chu <jane.chu@...cle.com>,
Alistair Popple <apopple@...dia.com>, Donet Tom <donettom@...ux.ibm.com>
Subject: Re: [v1 resend 08/12] mm/thp: add split during migration support
On 7/4/25 08:17, Mika Penttilä wrote:
> On 7/4/25 02:35, Balbir Singh wrote:
>> Support splitting pages during THP zone device migration as needed.
>> The common case that arises is that after setup, during migrate
>> the destination might not be able to allocate MIGRATE_PFN_COMPOUND
>> pages.
>>
>> Add a new routine migrate_vma_split_pages() to support the splitting
>> of already isolated pages. The pages being migrated are already unmapped
>> and marked for migration during setup (via unmap). folio_split() and
>> __split_unmapped_folio() take additional isolated arguments, to avoid
>> unmapping and remaping these pages and unlocking/putting the folio.
>>
>> Cc: Karol Herbst <kherbst@...hat.com>
>> Cc: Lyude Paul <lyude@...hat.com>
>> Cc: Danilo Krummrich <dakr@...nel.org>
>> Cc: David Airlie <airlied@...il.com>
>> Cc: Simona Vetter <simona@...ll.ch>
>> Cc: "Jérôme Glisse" <jglisse@...hat.com>
>> Cc: Shuah Khan <shuah@...nel.org>
>> Cc: David Hildenbrand <david@...hat.com>
>> Cc: Barry Song <baohua@...nel.org>
>> Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>
>> Cc: Ryan Roberts <ryan.roberts@....com>
>> Cc: Matthew Wilcox <willy@...radead.org>
>> Cc: Peter Xu <peterx@...hat.com>
>> Cc: Zi Yan <ziy@...dia.com>
>> Cc: Kefeng Wang <wangkefeng.wang@...wei.com>
>> Cc: Jane Chu <jane.chu@...cle.com>
>> Cc: Alistair Popple <apopple@...dia.com>
>> Cc: Donet Tom <donettom@...ux.ibm.com>
>>
>> Signed-off-by: Balbir Singh <balbirs@...dia.com>
>> ---
>> include/linux/huge_mm.h | 11 ++++++--
>> mm/huge_memory.c | 54 ++++++++++++++++++++-----------------
>> mm/migrate_device.c | 59 ++++++++++++++++++++++++++++++++---------
>> 3 files changed, 85 insertions(+), 39 deletions(-)
>>
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index 65a1bdf29bb9..5f55a754e57c 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -343,8 +343,8 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
>> vm_flags_t vm_flags);
>>
>> bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
>> -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> - unsigned int new_order);
>> +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> + unsigned int new_order, bool isolated);
>> int min_order_for_split(struct folio *folio);
>> int split_folio_to_list(struct folio *folio, struct list_head *list);
>> bool uniform_split_supported(struct folio *folio, unsigned int new_order,
>> @@ -353,6 +353,13 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
>> bool warns);
>> int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
>> struct list_head *list);
>> +
>> +static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> + unsigned int new_order)
>> +{
>> + return __split_huge_page_to_list_to_order(page, list, new_order, false);
>> +}
>> +
>> /*
>> * try_folio_split - try to split a @folio at @page using non uniform split.
>> * @folio: folio to be split
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index d55e36ae0c39..e00ddfed22fa 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -3424,15 +3424,6 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
>> new_folio->mapping = folio->mapping;
>> new_folio->index = folio->index + i;
>>
>> - /*
>> - * page->private should not be set in tail pages. Fix up and warn once
>> - * if private is unexpectedly set.
>> - */
>> - if (unlikely(new_folio->private)) {
>> - VM_WARN_ON_ONCE_PAGE(true, new_head);
>> - new_folio->private = NULL;
>> - }
>> -
>> if (folio_test_swapcache(folio))
>> new_folio->swap.val = folio->swap.val + i;
>>
>> @@ -3519,7 +3510,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
>> struct page *split_at, struct page *lock_at,
>> struct list_head *list, pgoff_t end,
>> struct xa_state *xas, struct address_space *mapping,
>> - bool uniform_split)
>> + bool uniform_split, bool isolated)
>> {
>> struct lruvec *lruvec;
>> struct address_space *swap_cache = NULL;
>> @@ -3643,8 +3634,9 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
>> percpu_ref_get_many(&release->pgmap->ref,
>> (1 << new_order) - 1);
>>
>> - lru_add_split_folio(origin_folio, release, lruvec,
>> - list);
>> + if (!isolated)
>> + lru_add_split_folio(origin_folio, release,
>> + lruvec, list);
>>
>> /* Some pages can be beyond EOF: drop them from cache */
>> if (release->index >= end) {
>> @@ -3697,6 +3689,12 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
>> if (nr_dropped)
>> shmem_uncharge(mapping->host, nr_dropped);
>>
>> + /*
>> + * Don't remap and unlock isolated folios
>> + */
>> + if (isolated)
>> + return ret;
>> +
>> remap_page(origin_folio, 1 << order,
>> folio_test_anon(origin_folio) ?
>> RMP_USE_SHARED_ZEROPAGE : 0);
>> @@ -3790,6 +3788,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
>> * @lock_at: a page within @folio to be left locked to caller
>> * @list: after-split folios will be put on it if non NULL
>> * @uniform_split: perform uniform split or not (non-uniform split)
>> + * @isolated: The pages are already unmapped
>> *
>> * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
>> * It is in charge of checking whether the split is supported or not and
>> @@ -3800,7 +3799,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
>> */
>> static int __folio_split(struct folio *folio, unsigned int new_order,
>> struct page *split_at, struct page *lock_at,
>> - struct list_head *list, bool uniform_split)
>> + struct list_head *list, bool uniform_split, bool isolated)
>> {
>> struct deferred_split *ds_queue = get_deferred_split_queue(folio);
>> XA_STATE(xas, &folio->mapping->i_pages, folio->index);
>> @@ -3846,14 +3845,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>> * is taken to serialise against parallel split or collapse
>> * operations.
>> */
>> - anon_vma = folio_get_anon_vma(folio);
>> - if (!anon_vma) {
>> - ret = -EBUSY;
>> - goto out;
>> + if (!isolated) {
>> + anon_vma = folio_get_anon_vma(folio);
>> + if (!anon_vma) {
>> + ret = -EBUSY;
>> + goto out;
>> + }
>> + anon_vma_lock_write(anon_vma);
>> }
>> end = -1;
>> mapping = NULL;
>> - anon_vma_lock_write(anon_vma);
>> } else {
>> unsigned int min_order;
>> gfp_t gfp;
>> @@ -3920,7 +3921,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>> goto out_unlock;
>> }
>>
>> - unmap_folio(folio);
>> + if (!isolated)
>> + unmap_folio(folio);
>>
>> /* block interrupt reentry in xa_lock and spinlock */
>> local_irq_disable();
>> @@ -3973,14 +3975,15 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>>
>> ret = __split_unmapped_folio(folio, new_order,
>> split_at, lock_at, list, end, &xas, mapping,
>> - uniform_split);
>> + uniform_split, isolated);
>> } else {
>> spin_unlock(&ds_queue->split_queue_lock);
>> fail:
>> if (mapping)
>> xas_unlock(&xas);
>> local_irq_enable();
>> - remap_page(folio, folio_nr_pages(folio), 0);
>> + if (!isolated)
>> + remap_page(folio, folio_nr_pages(folio), 0);
>> ret = -EAGAIN;
>> }
>>
>> @@ -4046,12 +4049,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
>> * Returns -EINVAL when trying to split to an order that is incompatible
>> * with the folio. Splitting to order 0 is compatible with all folios.
>> */
>> -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> - unsigned int new_order)
>> +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> + unsigned int new_order, bool isolated)
>> {
>> struct folio *folio = page_folio(page);
>>
>> - return __folio_split(folio, new_order, &folio->page, page, list, true);
>> + return __folio_split(folio, new_order, &folio->page, page, list, true,
>> + isolated);
>> }
>>
>> /*
>> @@ -4080,7 +4084,7 @@ int folio_split(struct folio *folio, unsigned int new_order,
>> struct page *split_at, struct list_head *list)
>> {
>> return __folio_split(folio, new_order, split_at, &folio->page, list,
>> - false);
>> + false, false);
>> }
>>
>> int min_order_for_split(struct folio *folio)
>> diff --git a/mm/migrate_device.c b/mm/migrate_device.c
>> index 41d0bd787969..acd2f03b178d 100644
>> --- a/mm/migrate_device.c
>> +++ b/mm/migrate_device.c
>> @@ -813,6 +813,24 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
>> src[i] &= ~MIGRATE_PFN_MIGRATE;
>> return 0;
>> }
>> +
>> +static void migrate_vma_split_pages(struct migrate_vma *migrate,
>> + unsigned long idx, unsigned long addr,
>> + struct folio *folio)
>> +{
>> + unsigned long i;
>> + unsigned long pfn;
>> + unsigned long flags;
>> +
>> + folio_get(folio);
>> + split_huge_pmd_address(migrate->vma, addr, true);
>> + __split_huge_page_to_list_to_order(folio_page(folio, 0), NULL, 0, true);
> We already have reference to folio, why is folio_get() needed ?
>
> Splitting the page splits pmd for anon folios, why is there split_huge_pmd_address() ?
Oh I see
+ if (!isolated)
+ unmap_folio(folio);
which explains the explicit split_huge_pmd_address(migrate->vma, addr, true);
Still, why the folio_get(folio);?
>
>> + migrate->src[idx] &= ~MIGRATE_PFN_COMPOUND;
>> + flags = migrate->src[idx] & ((1UL << MIGRATE_PFN_SHIFT) - 1);
>> + pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT;
>> + for (i = 1; i < HPAGE_PMD_NR; i++)
>> + migrate->src[i+idx] = migrate_pfn(pfn + i) | flags;
>> +}
>> #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
>> static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
>> unsigned long addr,
>> @@ -822,6 +840,11 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
>> {
>> return 0;
>> }
>> +
>> +static void migrate_vma_split_pages(struct migrate_vma *migrate,
>> + unsigned long idx, unsigned long addr,
>> + struct folio *folio)
>> +{}
>> #endif
>>
>> /*
>> @@ -971,8 +994,9 @@ static void __migrate_device_pages(unsigned long *src_pfns,
>> struct migrate_vma *migrate)
>> {
>> struct mmu_notifier_range range;
>> - unsigned long i;
>> + unsigned long i, j;
>> bool notified = false;
>> + unsigned long addr;
>>
>> for (i = 0; i < npages; ) {
>> struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
>> @@ -1014,12 +1038,16 @@ static void __migrate_device_pages(unsigned long *src_pfns,
>> (!(dst_pfns[i] & MIGRATE_PFN_COMPOUND))) {
>> nr = HPAGE_PMD_NR;
>> src_pfns[i] &= ~MIGRATE_PFN_COMPOUND;
>> - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
>> - goto next;
>> + } else {
>> + nr = 1;
>> }
>>
>> - migrate_vma_insert_page(migrate, addr, &dst_pfns[i],
>> - &src_pfns[i]);
>> + for (j = 0; j < nr && i + j < npages; j++) {
>> + src_pfns[i+j] |= MIGRATE_PFN_MIGRATE;
>> + migrate_vma_insert_page(migrate,
>> + addr + j * PAGE_SIZE,
>> + &dst_pfns[i+j], &src_pfns[i+j]);
>> + }
>> goto next;
>> }
>>
>> @@ -1041,7 +1069,9 @@ static void __migrate_device_pages(unsigned long *src_pfns,
>> MIGRATE_PFN_COMPOUND);
>> goto next;
>> }
>> - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
>> + nr = 1 << folio_order(folio);
>> + addr = migrate->start + i * PAGE_SIZE;
>> + migrate_vma_split_pages(migrate, i, addr, folio);
>> } else if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) &&
>> (dst_pfns[i] & MIGRATE_PFN_COMPOUND) &&
>> !(src_pfns[i] & MIGRATE_PFN_COMPOUND)) {
>> @@ -1076,12 +1106,17 @@ static void __migrate_device_pages(unsigned long *src_pfns,
>> BUG_ON(folio_test_writeback(folio));
>>
>> if (migrate && migrate->fault_page == page)
>> - extra_cnt = 1;
>> - r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
>> - if (r != MIGRATEPAGE_SUCCESS)
>> - src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
>> - else
>> - folio_migrate_flags(newfolio, folio);
>> + extra_cnt++;
>> + for (j = 0; j < nr && i + j < npages; j++) {
>> + folio = page_folio(migrate_pfn_to_page(src_pfns[i+j]));
>> + newfolio = page_folio(migrate_pfn_to_page(dst_pfns[i+j]));
>> +
>> + r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
>> + if (r != MIGRATEPAGE_SUCCESS)
>> + src_pfns[i+j] &= ~MIGRATE_PFN_MIGRATE;
>> + else
>> + folio_migrate_flags(newfolio, folio);
>> + }
>> next:
>> i += nr;
>> }
Powered by blists - more mailing lists