[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240425085051.74889-1-ioworker0@gmail.com>
Date: Thu, 25 Apr 2024 16:50:51 +0800
From: Lance Yang <ioworker0@...il.com>
To: david@...hat.com,
ziy@...dia.com
Cc: ioworker0@...il.com,
21cnbao@...il.com,
akpm@...ux-foundation.org,
fengwei.yin@...el.com,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
maskray@...gle.com,
mhocko@...e.com,
minchan@...nel.org,
peterx@...hat.com,
ryan.roberts@....com,
shy828301@...il.com,
songmuchun@...edance.com,
wangkefeng.wang@...wei.com,
willy@...radead.org,
xiehuan09@...il.com,
zokeefe@...gle.com
Subject: Re: [PATCH v2 1/1] mm/vmscan: avoid split PMD-mapped THP during shrink_folio_list()
Hey Zi, David,
How about this change(diff against mm-unstable) as follows?
I'd like to add __try_to_unmap_huge_pmd() as a new internal function
specifically for unmapping PMD-mapped folios. If, for any reason, we cannot
unmap the folio, then we'll still split it as previously done.
Currently, __try_to_unmap_huge_pmd() only handles lazyfree THPs, but it
can be extended to support other large folios that are PMD-mapped in the
future if needed.
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 670218f762c8..0f906dc6d280 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -100,8 +100,6 @@ enum ttu_flags {
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
- TTU_LAZYFREE_THP = 0x100, /* avoid splitting PMD-mapped THPs
- * that are marked as lazyfree. */
};
#ifdef CONFIG_MMU
diff --git a/mm/rmap.c b/mm/rmap.c
index a7913a454028..879c8923abfc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1606,6 +1606,19 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
#endif
}
+static bool __try_to_unmap_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
+ return discard_trans_pmd(vma, addr, folio);
+#endif
+
+ return false;
+}
+
/*
* @arg: enum ttu_flags will be passed to this argument
*/
@@ -1631,14 +1644,11 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
if (flags & TTU_SYNC)
pvmw.flags = PVMW_SYNC;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (flags & TTU_LAZYFREE_THP)
- if (discard_trans_pmd(vma, address, folio))
+ if (flags & TTU_SPLIT_HUGE_PMD) {
+ if (__try_to_unmap_huge_pmd(vma, address, folio))
return true;
-#endif
-
- if (flags & TTU_SPLIT_HUGE_PMD)
split_huge_pmd_address(vma, address, false, folio);
+ }
/*
* For THP, we have to assume the worse case ie pmd for invalidation.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e2686cc0c037..49bd94423961 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1277,13 +1277,6 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
if (folio_test_pmd_mappable(folio))
flags |= TTU_SPLIT_HUGE_PMD;
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (folio_test_anon(folio) && !was_swapbacked &&
- (flags & TTU_SPLIT_HUGE_PMD))
- flags |= TTU_LAZYFREE_THP;
-#endif
-
/*
* Without TTU_SYNC, try_to_unmap will only begin to
* hold PTL from the first present PTE within a large
--
Thanks,
Lance
Powered by blists - more mailing lists