[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <668bfb74-014c-4fd5-a636-ff5ec17861c3@linux.dev>
Date: Tue, 30 Sep 2025 13:22:54 +0800
From: Lance Yang <lance.yang@...ux.dev>
To: Dev Jain <dev.jain@....com>
Cc: peterx@...hat.com, ziy@...dia.com, baolin.wang@...ux.alibaba.com,
baohua@...nel.org, ryan.roberts@....com, npache@...hat.com,
riel@...riel.com, Liam.Howlett@...cle.com, vbabka@...e.cz,
harry.yoo@...cle.com, jannh@...gle.com, matthew.brost@...el.com,
joshua.hahnjy@...il.com, rakie.kim@...com, byungchul@...com,
gourry@...rry.net, ying.huang@...ux.alibaba.com, apopple@...dia.com,
usamaarif642@...il.com, yuzhao@...gle.com, lorenzo.stoakes@...cle.com,
linux-kernel@...r.kernel.org, linux-mm@...ck.org, ioworker0@...il.com,
stable@...r.kernel.org, akpm@...ux-foundation.org, david@...hat.com
Subject: Re: [PATCH v2 1/1] mm/rmap: fix soft-dirty and uffd-wp bit loss when
remapping zero-filled mTHP subpage to shared zeropage
On 2025/9/30 12:50, Dev Jain wrote:
>
> On 30/09/25 10:03 am, Lance Yang wrote:
>> From: Lance Yang <lance.yang@...ux.dev>
>>
>> When splitting an mTHP and replacing a zero-filled subpage with the
>> shared
>> zeropage, try_to_map_unused_to_zeropage() currently drops several
>> important
>> PTE bits.
>>
>> For userspace tools like CRIU, which rely on the soft-dirty mechanism for
>> incremental snapshots, losing the soft-dirty bit means modified pages are
>> missed, leading to inconsistent memory state after restore.
>>
>> As pointed out by David, the more critical uffd-wp bit is also dropped.
>> This breaks the userfaultfd write-protection mechanism, causing writes
>> to be silently missed by monitoring applications, which can lead to data
>> corruption.
>>
>> Preserve both the soft-dirty and uffd-wp bits from the old PTE when
>> creating the new zeropage mapping to ensure they are correctly tracked.
>>
>> Cc: <stable@...r.kernel.org>
>> Fixes: b1f202060afe ("mm: remap unused subpages to shared zeropage
>> when splitting isolated thp")
>> Suggested-by: David Hildenbrand <david@...hat.com>
>> Suggested-by: Dev Jain <dev.jain@....com>
>> Acked-by: David Hildenbrand <david@...hat.com>
>> Signed-off-by: Lance Yang <lance.yang@...ux.dev>
>> ---
>> v1 -> v2:
>> - Avoid calling ptep_get() multiple times (per Dev)
>> - Double-check the uffd-wp bit (per David)
>> - Collect Acked-by from David - thanks!
>> - https://lore.kernel.org/linux-mm/20250928044855.76359-1-
>> lance.yang@...ux.dev/
>>
>> mm/migrate.c | 9 ++++++++-
>> 1 file changed, 8 insertions(+), 1 deletion(-)
>>
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index ce83c2c3c287..50aa91d9ab4e 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -300,13 +300,14 @@ static bool try_to_map_unused_to_zeropage(struct
>> page_vma_mapped_walk *pvmw,
>> unsigned long idx)
>> {
>> struct page *page = folio_page(folio, idx);
>> + pte_t oldpte = ptep_get(pvmw->pte);
>
> What I meant to say was, you can pass oldpte from remove_migration_pte
> to this
> function. Basically define old_pte = ptep_get(pvmw.pte) in the
> declarations of
> the start of the while block in remove_migration_pte and remove the
> existing
> one. That will ensure ptep_get() gets called only once per iteration.
Ah, got it. Thanks for the clarification!
IIUC, you mean something like this:
```
diff --git a/mm/migrate.c b/mm/migrate.c
index ce83c2c3c287..bafd8cb3bebe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -297,6 +297,7 @@ bool isolate_folio_to_list(struct folio *folio,
struct list_head *list)
static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk
*pvmw,
struct folio *folio,
+ pte_t old_pte,
unsigned long idx)
{
struct page *page = folio_page(folio, idx);
@@ -306,7 +307,7 @@ static bool try_to_map_unused_to_zeropage(struct
page_vma_mapped_walk *pvmw,
return false;
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
+ VM_BUG_ON_PAGE(pte_present(old_pte), page);
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
mm_forbids_zeropage(pvmw->vma->vm_mm))
@@ -322,6 +323,12 @@ static bool try_to_map_unused_to_zeropage(struct
page_vma_mapped_walk *pvmw,
newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
pvmw->vma->vm_page_prot));
+
+ if (pte_swp_soft_dirty(old_pte))
+ newpte = pte_mksoft_dirty(newpte);
+ if (pte_swp_uffd_wp(old_pte))
+ newpte = pte_mkuffd_wp(newpte);
+
set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
@@ -344,7 +351,7 @@ static bool remove_migration_pte(struct folio *folio,
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
- pte_t old_pte;
+ pte_t old_pte = ptep_get(pvmw.pte);
pte_t pte;
swp_entry_t entry;
struct page *new;
@@ -365,12 +372,11 @@ static bool remove_migration_pte(struct folio *folio,
}
#endif
if (rmap_walk_arg->map_unused_to_zeropage &&
- try_to_map_unused_to_zeropage(&pvmw, folio, idx))
+ try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
continue;
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
- old_pte = ptep_get(pvmw.pte);
entry = pte_to_swp_entry(old_pte);
if (!is_migration_entry_young(entry))
```
ptep_get() gets called only once per iteration, right?
>
>> pte_t newpte;
>> if (PageCompound(page))
>> return false;
>> VM_BUG_ON_PAGE(!PageAnon(page), page);
>> VM_BUG_ON_PAGE(!PageLocked(page), page);
>> - VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
>> + VM_BUG_ON_PAGE(pte_present(oldpte), page);
>> if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags &
>> VM_LOCKED) ||
>> mm_forbids_zeropage(pvmw->vma->vm_mm))
>> @@ -322,6 +323,12 @@ static bool try_to_map_unused_to_zeropage(struct
>> page_vma_mapped_walk *pvmw,
>> newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
>> pvmw->vma->vm_page_prot));
>> +
>> + if (pte_swp_soft_dirty(oldpte))
>> + newpte = pte_mksoft_dirty(newpte);
>> + if (pte_swp_uffd_wp(oldpte))
>> + newpte = pte_mkuffd_wp(newpte);
>> +
>> set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
>> dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
Powered by blists - more mailing lists