[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220322032225.1402992-1-zhangyinan2019@email.szu.edu.cn>
Date: Tue, 22 Mar 2022 11:22:24 +0800
From: Yinan Zhang <zhangyinan2019@...il.szu.edu.cn>
To: akpm@...ux-foundation.org
Cc: willy@...radead.org, vbabka@...e.cz, william.kucharski@...cle.com,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
yejiajian2018@...il.szu.edu.cn, hanshenghong2019@...il.szu.edu.cn,
caoyixuan2019@...il.szu.edu.cn, zhaochongxi2019@...il.szu.edu.cn,
yuhongf@....edu.cn, Yinan Zhang <zhangyinan2019@...il.szu.edu.cn>
Subject: [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner
An application's memory consumption is high and keeps increasing,
then it is suspected of having memory leak. There are several
commonly used memory allocators: slab, cma, vmalloc, etc. The memory
leak identification can be speed up if page information allocated
by an individual allocator are analyzed individually. This patch
introduce vmalloc allocator for page_owner.
Following adjustments are made:
1) Add a member variable "allocator" to the page_owner struct.
And the value of "allocator" is predefined in a newly added string
array "allocator_name".
2) Add a function __set_page_owner_allocator() to record allocator
name in variable "allocator".
3) Add allocator name in the output of print_page_owner().
This work is coauthored by
Shenghong Han
Yixuan Cao
Chongxi Zhao
Jiajian Ye
Yuhong Feng
Yongqiang Liu
Signed-off-by: Yinan Zhang <zhangyinan2019@...il.szu.edu.cn>
---
include/linux/page_owner.h | 18 ++++++++++++++++++
mm/page_owner.c | 29 +++++++++++++++++++++++++++--
2 files changed, 45 insertions(+), 2 deletions(-)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..d559781dde67 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,6 +11,8 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
unsigned short order, gfp_t gfp_mask);
+extern void __set_page_owner_allocator(struct page *page, unsigned short order,
+ unsigned short allocator);
extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
@@ -18,6 +20,11 @@ extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
+enum page_owner_allocator {
+ PAGE_OWNER_ALLOCATOR_UNKNOWN = 0,
+ PAGE_OWNER_ALLOCATOR_VMALLOC
+};
+
static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
@@ -31,6 +38,13 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
+static inline void set_page_owner_allocator(struct page *page, unsigned short order,
+ unsigned short allocator)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner_allocator(page, order, allocator);
+}
+
static inline void split_page_owner(struct page *page, unsigned int nr)
{
if (static_branch_unlikely(&page_owner_inited))
@@ -59,6 +73,10 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
+static inline void set_page_owner_allocator(struct page *page, unsigned short order,
+ unsigned short allocator)
+{
+}
static inline void split_page_owner(struct page *page,
unsigned short order)
{
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0a9588506571..11bb805c61fd 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -32,6 +32,12 @@ struct page_owner {
char comm[TASK_COMM_LEN];
pid_t pid;
pid_t tgid;
+ unsigned short allocator;
+};
+
+const char * const allocator_name[] = {
+ "unknown",
+ "vmalloc",
};
static bool page_owner_enabled = false;
@@ -148,6 +154,7 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner = get_page_owner(page_ext);
page_owner->free_handle = handle;
page_owner->free_ts_nsec = free_ts_nsec;
+ page_owner->allocator = PAGE_OWNER_ALLOCATOR_UNKNOWN;
page_ext = page_ext_next(page_ext);
}
}
@@ -190,6 +197,22 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
}
+void __set_page_owner_allocator(struct page *page, unsigned short order, unsigned short allocator)
+{
+ int i;
+ struct page_ext *page_ext;
+ struct page_owner *page_owner;
+
+ page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+ for (i = 0; i < (1 << order); i++) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->allocator = allocator;
+ page_ext = page_ext_next(page_ext);
+ }
+}
+
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
@@ -238,6 +261,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
new_page_owner->tgid = old_page_owner->tgid;
new_page_owner->ts_nsec = old_page_owner->ts_nsec;
new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
+ new_page_owner->allocator = old_page_owner->allocator;
strcpy(new_page_owner->comm, old_page_owner->comm);
/*
@@ -386,11 +410,12 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
ret = scnprintf(kbuf, count,
- "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
+ "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns, allocator %s\n",
page_owner->order, page_owner->gfp_mask,
&page_owner->gfp_mask, page_owner->pid,
page_owner->tgid, page_owner->comm,
- page_owner->ts_nsec, page_owner->free_ts_nsec);
+ page_owner->ts_nsec, page_owner->free_ts_nsec,
+ allocator_name[page_owner->allocator]);
/* Print information relevant to grouping pages by mobility */
pageblock_mt = get_pageblock_migratetype(page);
--
2.25.1
Powered by blists - more mailing lists