[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201206082948.11812-3-songmuchun@bytedance.com>
Date: Sun, 6 Dec 2020 16:29:45 +0800
From: Muchun Song <songmuchun@...edance.com>
To: gregkh@...uxfoundation.org, rafael@...nel.org, adobriyan@...il.com,
akpm@...ux-foundation.org, hannes@...xchg.org, mhocko@...nel.org,
vdavydov.dev@...il.com, hughd@...gle.com, will@...nel.org,
guro@...com, rppt@...nel.org, tglx@...utronix.de, esyr@...hat.com,
peterx@...hat.com, krisman@...labora.com, surenb@...gle.com,
avagin@...nvz.org, elver@...gle.com, rdunlap@...radead.org,
iamjoonsoo.kim@....com
Cc: linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, cgroups@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v2 09/12] mm: memcontrol: convert vmstat slab counters to bytes
the global and per-node counters are stored in pages, however memcg
and lruvec counters are stored in bytes. This scheme looks weird.
So convert all vmstat slab counters to bytes.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
include/linux/vmstat.h | 17 ++++++++++-------
mm/vmstat.c | 21 ++++++++++-----------
2 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 322dcbfcc933..fd1a3d5d4926 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -197,18 +197,26 @@ static inline
unsigned long global_node_page_state_pages(enum node_stat_item item)
{
long x = atomic_long_read(&vm_node_stat[item]);
+
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
+ if (vmstat_item_in_bytes(item))
+ x >>= PAGE_SHIFT;
return x;
}
static inline unsigned long global_node_page_state(enum node_stat_item item)
{
- VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+ long x = atomic_long_read(&vm_node_stat[item]);
- return global_node_page_state_pages(item);
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline unsigned long zone_page_state(struct zone *zone,
@@ -312,11 +320,6 @@ static inline void __mod_zone_page_state(struct zone *zone,
static inline void __mod_node_page_state(struct pglist_data *pgdat,
enum node_stat_item item, int delta)
{
- if (vmstat_item_in_bytes(item)) {
- VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
- delta >>= PAGE_SHIFT;
- }
-
node_page_state_add(delta, pgdat, item);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8d77ee426e22..7fb0c7cb9516 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -345,11 +345,6 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
long x;
long t;
- if (vmstat_item_in_bytes(item)) {
- VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
- delta >>= PAGE_SHIFT;
- }
-
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -554,11 +549,6 @@ static inline void mod_node_state(struct pglist_data *pgdat,
s8 __percpu *p = pcp->vm_node_stat_diff + item;
long o, n, t, z;
- if (vmstat_item_in_bytes(item)) {
- VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
- delta >>= PAGE_SHIFT;
- }
-
do {
z = 0; /* overflow to node counters */
@@ -1012,19 +1002,28 @@ unsigned long node_page_state_pages(struct pglist_data *pgdat,
enum node_stat_item item)
{
long x = atomic_long_read(&pgdat->vm_stat[item]);
+
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
+ if (vmstat_item_in_bytes(item))
+ x >>= PAGE_SHIFT;
return x;
}
unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item)
{
+ long x = atomic_long_read(&pgdat->vm_stat[item]);
+
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
- return node_page_state_pages(pgdat, item);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
#endif
--
2.11.0
Powered by blists - more mailing lists