[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241024162318.1640781-1-surenb@google.com>
Date: Thu, 24 Oct 2024 09:23:18 -0700
From: Suren Baghdasaryan <surenb@...gle.com>
To: akpm@...ux-foundation.org
Cc: kent.overstreet@...ux.dev, yuzhao@...gle.com, souravpanda@...gle.com,
pasha.tatashin@...een.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
surenb@...gle.com
Subject: [PATCH 1/1] mm/codetag: uninline and move pgalloc_tag_copy and pgalloc_tag_split
pgalloc_tag_copy() and pgalloc_tag_split() are sizable and outside of
any performance-critical paths, so it should be fine to uninline them.
Also move their declarations into pgalloc_tag.h which seems like a more
appropriate place for them.
No functional changes other than uninlining.
Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
Suggested-by: Andrew Morton <akpm@...ux-foundation.org>
---
Applies over mm-unstable
include/linux/mm.h | 58 -------------------------------------
include/linux/pgalloc_tag.h | 5 ++++
lib/alloc_tag.c | 48 ++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 58 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4ef8cf1043f1..5184624c0f21 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4165,62 +4165,4 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla
}
#endif
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
-{
- int i;
- struct alloc_tag *tag;
- unsigned int nr_pages = 1 << new_order;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- tag = pgalloc_tag_get(&folio->page);
- if (!tag)
- return;
-
- for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
- union pgtag_ref_handle handle;
- union codetag_ref ref;
-
- if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(&ref, tag);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
- }
- }
-}
-
-static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
-{
- union pgtag_ref_handle handle;
- union codetag_ref ref;
- struct alloc_tag *tag;
-
- tag = pgalloc_tag_get(&old->page);
- if (!tag)
- return;
-
- if (!get_page_tag_ref(&new->page, &ref, &handle))
- return;
-
- /* Clear the old ref to the original allocation tag. */
- clear_page_tag_ref(&old->page);
- /* Decrement the counters of the tag on get_new_folio. */
- alloc_tag_sub(&ref, folio_size(new));
- __alloc_tag_ref_set(&ref, tag);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
-}
-#else /* !CONFIG_MEM_ALLOC_PROFILING */
-static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
-{
-}
-
-static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
-{
-}
-#endif /* CONFIG_MEM_ALLOC_PROFILING */
-
#endif /* _LINUX_MM_H */
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 1fe63b52e5e5..0e43ab653ab6 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -230,6 +230,9 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
}
+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
+void pgalloc_tag_copy(struct folio *new, struct folio *old);
+
void __init alloc_tag_sec_init(void);
#else /* CONFIG_MEM_ALLOC_PROFILING */
@@ -241,6 +244,8 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
static inline void alloc_tag_sec_init(void) {}
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index a6f6f014461e..c1ddac2d29f0 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -163,6 +163,54 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl
return nr;
}
+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(&ref, tag);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+ }
+}
+
+void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+ struct alloc_tag *tag;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ if (!get_page_tag_ref(&new->page, &ref, &handle))
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(&ref, folio_size(new));
+ __alloc_tag_ref_set(&ref, tag);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+}
+
static void shutdown_mem_profiling(bool remove_file)
{
if (mem_alloc_profiling_enabled())
base-commit: 9c111059234a949a4d3442a413ade19cc65ab927
--
2.47.0.105.g07ac214952-goog
Powered by blists - more mailing lists