[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220512044634.63586-5-ligang.bdlg@bytedance.com>
Date: Thu, 12 May 2022 12:46:33 +0800
From: Gang Li <ligang.bdlg@...edance.com>
To: akpm@...ux-foundation.org
Cc: songmuchun@...edance.com, hca@...ux.ibm.com, gor@...ux.ibm.com,
agordeev@...ux.ibm.com, borntraeger@...ux.ibm.com,
svens@...ux.ibm.com, ebiederm@...ssion.com, keescook@...omium.org,
viro@...iv.linux.org.uk, rostedt@...dmis.org, mingo@...hat.com,
peterz@...radead.org, acme@...nel.org, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
namhyung@...nel.org, david@...hat.com, imbrenda@...ux.ibm.com,
apopple@...dia.com, adobriyan@...il.com,
stephen.s.brennan@...cle.com, ohoono.kwon@...sung.com,
haolee.swjtu@...il.com, kaleshsingh@...gle.com,
zhengqi.arch@...edance.com, peterx@...hat.com, shy828301@...il.com,
surenb@...gle.com, ccross@...gle.com, vincent.whitchurch@...s.com,
tglx@...utronix.de, bigeasy@...utronix.de, fenghua.yu@...el.com,
linux-s390@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Gang Li <ligang.bdlg@...edance.com>
Subject: [PATCH 4/5 v1] mm: enable per numa node rss_stat count
Now we have all the infrastructure ready. Modify `get/add/inc/dec_mm_counter`,
`sync_mm_rss`, `add_mm_counter_fast` and `add_mm_rss_vec` to enable per numa
node rss_stat count.
Signed-off-by: Gang Li <ligang.bdlg@...edance.com>
---
include/linux/mm.h | 42 +++++++++++++++++++++++++++++++++++-------
mm/memory.c | 20 ++++++++++++++++++--
2 files changed, 53 insertions(+), 9 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cde5529285d6..f0f21065b81b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1994,8 +1994,18 @@ static inline bool get_user_page_fast_only(unsigned long addr,
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member, int node)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
+ long val;
+ WARN_ON(node == NUMA_NO_NODE && member == MM_NO_TYPE);
+
+ if (node == NUMA_NO_NODE)
+ val = atomic_long_read(&mm->rss_stat.count[member]);
+ else
+#ifdef CONFIG_NUMA
+ val = atomic_long_read(&mm->rss_stat.numa_count[node]);
+#else
+ val = 0;
+#endif
#ifdef SPLIT_RSS_COUNTING
/*
* counter is updated in asynchronous manner and may go to minus.
@@ -2012,23 +2022,41 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long member_count, int
static inline void add_mm_counter(struct mm_struct *mm, int member, long value, int node)
{
- long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, value);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_add_return(value, &mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, value);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, 1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_inc_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, 1);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, -1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_dec_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, -1);
}
/* Optimized variant when page is already known not to be PageAnon */
diff --git a/mm/memory.c b/mm/memory.c
index 2d3040a190f6..f7b67da772b2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -188,6 +188,14 @@ void sync_mm_rss(struct mm_struct *mm)
current->rss_stat.count[i] = 0;
}
}
+#ifdef CONFIG_NUMA
+ for_each_node(i) {
+ if (current->rss_stat.numa_count[i]) {
+ add_mm_counter(mm, MM_NO_TYPE, current->rss_stat.numa_count[i], i);
+ current->rss_stat.numa_count[i] = 0;
+ }
+ }
+#endif
current->rss_stat.events = 0;
}
@@ -195,9 +203,12 @@ static void add_mm_counter_fast(struct mm_struct *mm, int member, int val, int n
{
struct task_struct *task = current;
- if (likely(task->mm == mm))
+ if (likely(task->mm == mm)) {
task->rss_stat.count[member] += val;
- else
+#ifdef CONFIG_NUMA
+ task->rss_stat.numa_count[node] += val;
+#endif
+ } else
add_mm_counter(mm, member, val, node);
}
#define inc_mm_counter_fast(mm, member, node) add_mm_counter_fast(mm, member, 1, node)
@@ -508,6 +519,11 @@ static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss, int *numa_rss)
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i], NUMA_NO_NODE);
+#ifdef CONFIG_NUMA
+ for_each_node(i)
+ if (numa_rss[i] != 0)
+ add_mm_counter(mm, MM_NO_TYPE, numa_rss[i], i);
+#endif
}
/*
--
2.20.1
Powered by blists - more mailing lists