[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b84846bd-801f-42b6-b1d4-3d784ddbcd1f@nvidia.com>
Date: Tue, 8 Jul 2025 14:20:04 +1000
From: Balbir Singh <balbirs@...dia.com>
To: Mika Penttilä <mpenttil@...hat.com>, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
Karol Herbst <kherbst@...hat.com>, Lyude Paul <lyude@...hat.com>,
Danilo Krummrich <dakr@...nel.org>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>, Jérôme Glisse
<jglisse@...hat.com>, Shuah Khan <shuah@...nel.org>,
David Hildenbrand <david@...hat.com>, Barry Song <baohua@...nel.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Ryan Roberts <ryan.roberts@....com>, Matthew Wilcox <willy@...radead.org>,
Peter Xu <peterx@...hat.com>, Zi Yan <ziy@...dia.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>, Jane Chu <jane.chu@...cle.com>,
Alistair Popple <apopple@...dia.com>, Donet Tom <donettom@...ux.ibm.com>
Subject: Re: [v1 resend 03/12] mm/thp: zone_device awareness in THP handling
code
On 7/7/25 13:49, Mika Penttilä wrote:
>
> On 7/4/25 02:35, Balbir Singh wrote:
>> Make THP handling code in the mm subsystem for THP pages
>> aware of zone device pages. Although the code is
>> designed to be generic when it comes to handling splitting
>> of pages, the code is designed to work for THP page sizes
>> corresponding to HPAGE_PMD_NR.
>>
>> Modify page_vma_mapped_walk() to return true when a zone
>> device huge entry is present, enabling try_to_migrate()
>> and other code migration paths to appropriately process the
>> entry
>>
>> pmd_pfn() does not work well with zone device entries, use
>> pfn_pmd_entry_to_swap() for checking and comparison as for
>> zone device entries.
>>
>> try_to_map_to_unused_zeropage() does not apply to zone device
>> entries, zone device entries are ignored in the call.
>>
>> Cc: Karol Herbst <kherbst@...hat.com>
>> Cc: Lyude Paul <lyude@...hat.com>
>> Cc: Danilo Krummrich <dakr@...nel.org>
>> Cc: David Airlie <airlied@...il.com>
>> Cc: Simona Vetter <simona@...ll.ch>
>> Cc: "Jérôme Glisse" <jglisse@...hat.com>
>> Cc: Shuah Khan <shuah@...nel.org>
>> Cc: David Hildenbrand <david@...hat.com>
>> Cc: Barry Song <baohua@...nel.org>
>> Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>
>> Cc: Ryan Roberts <ryan.roberts@....com>
>> Cc: Matthew Wilcox <willy@...radead.org>
>> Cc: Peter Xu <peterx@...hat.com>
>> Cc: Zi Yan <ziy@...dia.com>
>> Cc: Kefeng Wang <wangkefeng.wang@...wei.com>
>> Cc: Jane Chu <jane.chu@...cle.com>
>> Cc: Alistair Popple <apopple@...dia.com>
>> Cc: Donet Tom <donettom@...ux.ibm.com>
>>
>> Signed-off-by: Balbir Singh <balbirs@...dia.com>
>> ---
>> mm/huge_memory.c | 153 +++++++++++++++++++++++++++++++------------
>> mm/migrate.c | 2 +
>> mm/page_vma_mapped.c | 10 +++
>> mm/pgtable-generic.c | 6 ++
>> mm/rmap.c | 19 +++++-
>> 5 files changed, 146 insertions(+), 44 deletions(-)
>>
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index ce130225a8e5..e6e390d0308f 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -1711,7 +1711,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
>> if (unlikely(is_swap_pmd(pmd))) {
>> swp_entry_t entry = pmd_to_swp_entry(pmd);
>>
>> - VM_BUG_ON(!is_pmd_migration_entry(pmd));
>> + VM_BUG_ON(!is_pmd_migration_entry(pmd) &&
>> + !is_device_private_entry(entry));
>> if (!is_readable_migration_entry(entry)) {
>> entry = make_readable_migration_entry(
>> swp_offset(entry));
>> @@ -2222,10 +2223,17 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>> } else if (thp_migration_supported()) {
>> swp_entry_t entry;
>>
>> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
>> entry = pmd_to_swp_entry(orig_pmd);
>> folio = pfn_swap_entry_folio(entry);
>> flush_needed = 0;
>> +
>> + VM_BUG_ON(!is_pmd_migration_entry(*pmd) &&
>> + !folio_is_device_private(folio));
>> +
>> + if (folio_is_device_private(folio)) {
>> + folio_remove_rmap_pmd(folio, folio_page(folio, 0), vma);
>> + WARN_ON_ONCE(folio_mapcount(folio) < 0);
>> + }
>> } else
>> WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
>>
>> @@ -2247,6 +2255,15 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>> folio_mark_accessed(folio);
>> }
>>
>> + /*
>> + * Do a folio put on zone device private pages after
>> + * changes to mm_counter, because the folio_put() will
>> + * clean folio->mapping and the folio_test_anon() check
>> + * will not be usable.
>> + */
>> + if (folio_is_device_private(folio))
>> + folio_put(folio);
>> +
>> spin_unlock(ptl);
>> if (flush_needed)
>> tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
>> @@ -2375,7 +2392,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>> struct folio *folio = pfn_swap_entry_folio(entry);
>> pmd_t newpmd;
>>
>> - VM_BUG_ON(!is_pmd_migration_entry(*pmd));
>> + VM_BUG_ON(!is_pmd_migration_entry(*pmd) &&
>> + !folio_is_device_private(folio));
>> if (is_writable_migration_entry(entry)) {
>> /*
>> * A protection check is difficult so
>> @@ -2388,9 +2406,11 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>> newpmd = swp_entry_to_pmd(entry);
>> if (pmd_swp_soft_dirty(*pmd))
>> newpmd = pmd_swp_mksoft_dirty(newpmd);
>> - } else {
>> + } else if (is_writable_device_private_entry(entry)) {
>> + newpmd = swp_entry_to_pmd(entry);
>> + entry = make_device_exclusive_entry(swp_offset(entry));
>> + } else
>> newpmd = *pmd;
>> - }
>>
>> if (uffd_wp)
>> newpmd = pmd_swp_mkuffd_wp(newpmd);
>> @@ -2842,16 +2862,20 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>> struct page *page;
>> pgtable_t pgtable;
>> pmd_t old_pmd, _pmd;
>> - bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
>> - bool anon_exclusive = false, dirty = false;
>> + bool young, write, soft_dirty, uffd_wp = false;
>> + bool anon_exclusive = false, dirty = false, present = false;
>> unsigned long addr;
>> pte_t *pte;
>> int i;
>> + swp_entry_t swp_entry;
>>
>> VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
>> VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
>> VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
>> - VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd));
>> +
>> + VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
>> + && !(is_swap_pmd(*pmd) &&
>> + is_device_private_entry(pmd_to_swp_entry(*pmd))));
>>
>> count_vm_event(THP_SPLIT_PMD);
>>
>> @@ -2899,20 +2923,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>> return __split_huge_zero_page_pmd(vma, haddr, pmd);
>> }
>>
>> - pmd_migration = is_pmd_migration_entry(*pmd);
>> - if (unlikely(pmd_migration)) {
>> - swp_entry_t entry;
>>
>> + present = pmd_present(*pmd);
>> + if (unlikely(!present)) {
>> + swp_entry = pmd_to_swp_entry(*pmd);
>> old_pmd = *pmd;
>> - entry = pmd_to_swp_entry(old_pmd);
>> - page = pfn_swap_entry_to_page(entry);
>> - write = is_writable_migration_entry(entry);
>> +
>> + folio = pfn_swap_entry_folio(swp_entry);
>> + VM_BUG_ON(!is_migration_entry(swp_entry) &&
>> + !is_device_private_entry(swp_entry));
>> + page = pfn_swap_entry_to_page(swp_entry);
>> + write = is_writable_migration_entry(swp_entry);
>> +
>> if (PageAnon(page))
>> - anon_exclusive = is_readable_exclusive_migration_entry(entry);
>> - young = is_migration_entry_young(entry);
>> - dirty = is_migration_entry_dirty(entry);
>> + anon_exclusive =
>> + is_readable_exclusive_migration_entry(swp_entry);
>> soft_dirty = pmd_swp_soft_dirty(old_pmd);
>> uffd_wp = pmd_swp_uffd_wp(old_pmd);
>> + young = is_migration_entry_young(swp_entry);
>> + dirty = is_migration_entry_dirty(swp_entry);
>> } else {
>
> This is where folio_try_share_anon_rmap_pmd() is skipped for device private pages, to which I referred in
> https://lore.kernel.org/linux-mm/f1e26e18-83db-4c0e-b8d8-0af8ffa8a206@redhat.com/
>
Does it matter for device private pages/folios? It does not affect the freeze value.
Balbir Singh
Powered by blists - more mailing lists