[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250424155606.57488-1-lance.yang@linux.dev>
Date: Thu, 24 Apr 2025 23:56:06 +0800
From: Lance Yang <ioworker0@...il.com>
To: akpm@...ux-foundation.org
Cc: mingzhe.yang@...com,
david@...hat.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Lance Yang <lance.yang@...ux.dev>
Subject: [PATCH v3 1/1] mm/rmap: inline folio_test_large_maybe_mapped_shared() into callers
From: Lance Yang <lance.yang@...ux.dev>
To prevent the function from being used when CONFIG_MM_ID is disabled, we
intend to inline it into its few callers, which also would help maintain
the expected code placement.
Suggested-by: David Hildenbrand <david@...hat.com>
Signed-off-by: Lance Yang <lance.yang@...ux.dev>
---
v2 -> v3:
* Inline the function, suggested by David
* https://lore.kernel.org/all/20250418152228.20545-1-lance.yang@linux.dev
v1 -> v2:
* Update the changelog, suggested by Andrew and David
* https://lore.kernel.org/linux-mm/20250417124908.58543-1-ioworker0@gmail.com
include/linux/mm.h | 2 +-
include/linux/page-flags.h | 4 ----
include/linux/rmap.h | 2 +-
mm/memory.c | 4 ++--
4 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf55206935c4..67e3b4f9cdc8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2303,7 +2303,7 @@ static inline bool folio_maybe_mapped_shared(struct folio *folio)
*/
if (mapcount <= 1)
return false;
- return folio_test_large_maybe_mapped_shared(folio);
+ return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
}
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e6a21b62dcce..8107c2ea43c4 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1230,10 +1230,6 @@ static inline int folio_has_private(const struct folio *folio)
return !!(folio->flags & PAGE_FLAGS_PRIVATE);
}
-static inline bool folio_test_large_maybe_mapped_shared(const struct folio *folio)
-{
- return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
-}
#undef PF_ANY
#undef PF_HEAD
#undef PF_NO_TAIL
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6b82b618846e..c4f4903b1088 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -223,7 +223,7 @@ static inline void __folio_large_mapcount_sanity_checks(const struct folio *foli
VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY &&
folio->_mm_id_mapcount[1] < 0);
VM_WARN_ON_ONCE(!folio_mapped(folio) &&
- folio_test_large_maybe_mapped_shared(folio));
+ test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids));
}
static __always_inline void folio_set_large_mapcount(struct folio *folio,
diff --git a/mm/memory.c b/mm/memory.c
index ba3ea0a82f7f..5e033adf67b1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3730,7 +3730,7 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
* If all folio references are from mappings, and all mappings are in
* the page tables of this MM, then this folio is exclusive to this MM.
*/
- if (folio_test_large_maybe_mapped_shared(folio))
+ if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
return false;
VM_WARN_ON_ONCE(folio_test_ksm(folio));
@@ -3753,7 +3753,7 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
folio_lock_large_mapcount(folio);
VM_WARN_ON_ONCE(folio_large_mapcount(folio) < folio_ref_count(folio));
- if (folio_test_large_maybe_mapped_shared(folio))
+ if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids))
goto unlock;
if (folio_large_mapcount(folio) != folio_ref_count(folio))
goto unlock;
--
2.49.0
Powered by blists - more mailing lists