[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220708082129.80115-5-ligang.bdlg@bytedance.com>
Date: Fri, 8 Jul 2022 16:21:28 +0800
From: Gang Li <ligang.bdlg@...edance.com>
To: mhocko@...e.com, akpm@...ux-foundation.org, surenb@...gle.com
Cc: hca@...ux.ibm.com, gor@...ux.ibm.com, agordeev@...ux.ibm.com,
borntraeger@...ux.ibm.com, svens@...ux.ibm.com,
viro@...iv.linux.org.uk, ebiederm@...ssion.com,
keescook@...omium.org, rostedt@...dmis.org, mingo@...hat.com,
peterz@...radead.org, acme@...nel.org, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
namhyung@...nel.org, david@...hat.com, imbrenda@...ux.ibm.com,
adobriyan@...il.com, yang.yang29@....com.cn, brauner@...nel.org,
stephen.s.brennan@...cle.com, zhengqi.arch@...edance.com,
haolee.swjtu@...il.com, xu.xin16@....com.cn,
Liam.Howlett@...cle.com, ohoono.kwon@...sung.com,
peterx@...hat.com, arnd@...db.de, shy828301@...il.com,
alex.sierra@....com, xianting.tian@...ux.alibaba.com,
willy@...radead.org, ccross@...gle.com, vbabka@...e.cz,
sujiaxun@...ontech.com, sfr@...b.auug.org.au,
vasily.averin@...ux.dev, mgorman@...e.de, vvghjk1234@...il.com,
tglx@...utronix.de, luto@...nel.org, bigeasy@...utronix.de,
fenghua.yu@...el.com, linux-s390@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-perf-users@...r.kernel.org,
Gang Li <ligang.bdlg@...edance.com>
Subject: [PATCH v2 4/5] mm: enable per numa node rss_stat count
Now we have all the infrastructure ready. Modify `*_mm_counter`,
`sync_mm_rss`, `add_mm_counter_fast` and `add_mm_rss_vec` to enable per
numa node rss_stat count.
Signed-off-by: Gang Li <ligang.bdlg@...edance.com>
---
include/linux/mm.h | 42 +++++++++++++++++++++++++++++++++++-------
mm/memory.c | 20 ++++++++++++++++++--
2 files changed, 53 insertions(+), 9 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a7150ee7439c..4a8e10ebc729 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2028,8 +2028,18 @@ static inline bool get_user_page_fast_only(unsigned long addr,
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member, int node)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
+ long val;
+ WARN_ON(node == NUMA_NO_NODE && member == MM_NO_TYPE);
+
+ if (node == NUMA_NO_NODE)
+ val = atomic_long_read(&mm->rss_stat.count[member]);
+ else
+#ifdef CONFIG_NUMA
+ val = atomic_long_read(&mm->rss_stat.numa_count[node]);
+#else
+ val = 0;
+#endif
#ifdef SPLIT_RSS_COUNTING
/*
* counter is updated in asynchronous manner and may go to minus.
@@ -2046,23 +2056,41 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long member_count, int
static inline void add_mm_counter(struct mm_struct *mm, int member, long value, int node)
{
- long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, value);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_add_return(value, &mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, value);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, 1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_inc_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, 1);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, -1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_dec_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, -1);
}
/* Optimized variant when page is already known not to be PageAnon */
diff --git a/mm/memory.c b/mm/memory.c
index b085f368ae11..66c8d10d36cc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -191,6 +191,14 @@ void sync_mm_rss(struct mm_struct *mm)
current->rss_stat.count[i] = 0;
}
}
+#ifdef CONFIG_NUMA
+ for_each_node(i) {
+ if (current->rss_stat.numa_count[i]) {
+ add_mm_counter(mm, MM_NO_TYPE, current->rss_stat.numa_count[i], i);
+ current->rss_stat.numa_count[i] = 0;
+ }
+ }
+#endif
current->rss_stat.events = 0;
}
@@ -198,9 +206,12 @@ static void add_mm_counter_fast(struct mm_struct *mm, int member, int val, int n
{
struct task_struct *task = current;
- if (likely(task->mm == mm))
+ if (likely(task->mm == mm)) {
task->rss_stat.count[member] += val;
- else
+#ifdef CONFIG_NUMA
+ task->rss_stat.numa_count[node] += val;
+#endif
+ } else
add_mm_counter(mm, member, val, node);
}
#define inc_mm_counter_fast(mm, member, node) add_mm_counter_fast(mm, member, 1, node)
@@ -520,6 +531,11 @@ static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss, int *numa_rss)
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i], NUMA_NO_NODE);
+#ifdef CONFIG_NUMA
+ for_each_node(i)
+ if (numa_rss[i] != 0)
+ add_mm_counter(mm, MM_NO_TYPE, numa_rss[i], i);
+#endif
}
/*
--
2.20.1
Powered by blists - more mailing lists