[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20240906042108.1150526-3-yuzhao@google.com>
Date: Thu, 5 Sep 2024 22:21:08 -0600
From: Yu Zhao <yuzhao@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>, Kent Overstreet <kent.overstreet@...ux.dev>,
Suren Baghdasaryan <surenb@...gle.com>
Cc: Muchun Song <muchun.song@...ux.dev>, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Yu Zhao <yuzhao@...gle.com>, stable@...r.kernel.org
Subject: [PATCH mm-unstable v2 3/3] mm/codetag: add pgalloc_tag_copy()
Add pgalloc_tag_copy() to transfer the codetag from the old folio to
the new one during migration. This makes original allocation sites
persist cross migration rather than lump into the get_new_folio
callbacks passed into migrate_pages(), e.g., compaction_alloc():
# echo 1 >/proc/sys/vm/compact_memory
# grep compaction_alloc /proc/allocinfo
Before this patch:
132968448 32463 mm/compaction.c:1880 func:compaction_alloc
After this patch:
0 0 mm/compaction.c:1880 func:compaction_alloc
Fixes: dcfe378c81f7 ("lib: introduce support for page allocation tagging")
Signed-off-by: Yu Zhao <yuzhao@...gle.com>
Acked-by: Suren Baghdasaryan <surenb@...gle.com>
Cc: <stable@...r.kernel.org>
---
include/linux/alloc_tag.h | 24 ++++++++++--------------
include/linux/mm.h | 27 +++++++++++++++++++++++++++
mm/migrate.c | 1 +
3 files changed, 38 insertions(+), 14 deletions(-)
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 896491d9ebe8..1f0a9ff23a2c 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -137,7 +137,16 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return;
+
ref->ct = &tag->ct;
+}
+
+static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ __alloc_tag_ref_set(ref, tag);
/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
@@ -147,22 +156,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
this_cpu_inc(tag->counters->calls);
}
-static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
-{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
-}
-
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
+ alloc_tag_ref_set(ref, tag);
this_cpu_add(tag->counters->bytes, bytes);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a07e93adb8ad..d750be768121 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4161,10 +4161,37 @@ static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new
}
}
}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ struct alloc_tag *tag;
+ union codetag_ref *ref;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ ref = get_page_tag_ref(&new->page);
+ if (!ref)
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(ref, folio_nr_pages(new));
+
+ __alloc_tag_ref_set(ref, tag);
+
+ put_page_tag_ref(ref);
+}
#else /* !CONFIG_MEM_ALLOC_PROFILING */
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
{
}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
#endif /* _LINUX_MM_H */
diff --git a/mm/migrate.c b/mm/migrate.c
index 0f6b78fd73aa..dfdb3a136bf8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -743,6 +743,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_set_readahead(newfolio);
folio_copy_owner(newfolio, folio);
+ pgalloc_tag_copy(newfolio, folio);
mem_cgroup_migrate(folio, newfolio);
}
--
2.46.0.469.g59c65b2a67-goog
Powered by blists - more mailing lists