[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <427kxhljdcrn7thput727j6vpqvxtalistn6yoq6ykdpbe5435@sn7a4rh7zcdo>
Date: Thu, 25 Sep 2025 10:31:54 +0100
From: Kiryl Shutsemau <kirill@...temov.name>
To: Dev Jain <dev.jain@....com>
Cc: akpm@...ux-foundation.org, david@...hat.com,
lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com, vbabka@...e.cz, rppt@...nel.org,
surenb@...gle.com, mhocko@...e.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm: move rmap of mTHP upon CoW reuse
On Thu, Sep 25, 2025 at 02:24:29PM +0530, Dev Jain wrote:
> At wp-fault time, when we find that a folio is exclusively mapped, we move
> folio->mapping to the faulting VMA's anon_vma, so that rmap overhead
> reduces. This is currently done for small folios (base pages) and
> PMD-mapped THPs. Do this for mTHP too.
>
> Signed-off-by: Dev Jain <dev.jain@....com>
> ---
> mm-selftests pass.
>
> mm/memory.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 7e32eb79ba99..ec04d2cec6b1 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4014,6 +4014,11 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
> * an additional folio reference and never ended up here.
> */
> exclusive = true;
> +
> + if (folio_trylock(folio)) {
> + folio_move_anon_rmap(folio, vma);
> + folio_unlock(folio);
> + }
Maybe take the folio lock earlier in wp_can_reuse_anon_folio() to cover
large folio handling too and avoid trylock here.
Something like this (untest):
diff --git a/mm/memory.c b/mm/memory.c
index 812a7d9f6531..d95cf670b6a8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3843,6 +3843,7 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
* an additional folio reference and never ended up here.
*/
exclusive = true;
+ folio_move_anon_rmap(folio, vma);
unlock:
folio_unlock_large_mapcount(folio);
return exclusive;
@@ -3858,8 +3859,15 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
static bool wp_can_reuse_anon_folio(struct folio *folio,
struct vm_area_struct *vma)
{
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio))
- return __wp_can_reuse_large_anon_folio(folio, vma);
+ bool exclusive = false;
+
+ if (!folio_trylock(folio))
+ return false;
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio)) {
+ exclusive = __wp_can_reuse_large_anon_folio(folio, vma);
+ goto unlock;
+ }
/*
* We have to verify under folio lock: these early checks are
@@ -3869,7 +3877,8 @@ static bool wp_can_reuse_anon_folio(struct folio *folio,
* KSM doesn't necessarily raise the folio refcount.
*/
if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
- return false;
+ goto unlock;
+
if (!folio_test_lru(folio))
/*
* We cannot easily detect+handle references from
@@ -3877,23 +3886,23 @@ static bool wp_can_reuse_anon_folio(struct folio *folio,
*/
lru_add_drain();
if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
- return false;
- if (!folio_trylock(folio))
- return false;
+ goto unlock;
+
if (folio_test_swapcache(folio))
folio_free_swap(folio);
- if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
- folio_unlock(folio);
- return false;
- }
+ if (folio_test_ksm(folio) || folio_ref_count(folio) != 1)
+ goto unlock;
+
/*
* Ok, we've got the only folio reference from our mapping
* and the folio is locked, it's dark out, and we're wearing
* sunglasses. Hit it.
*/
folio_move_anon_rmap(folio, vma);
+ exclusive = true;
+unlock:
folio_unlock(folio);
- return true;
+ return ret;
}
/*
--
Kiryl Shutsemau / Kirill A. Shutemov
Powered by blists - more mailing lists