[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250919195223.1560636-2-joshua.hahnjy@gmail.com>
Date: Fri, 19 Sep 2025 12:52:19 -0700
From: Joshua Hahn <joshua.hahnjy@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Johannes Weiner <hannes@...xchg.org>
Cc: Chris Mason <clm@...com>,
Kiryl Shutsemau <kirill@...temov.name>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Brendan Jackman <jackmanb@...gle.com>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Michal Hocko <mhocko@...e.com>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Zi Yan <ziy@...dia.com>,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
kernel-team@...a.com
Subject: [PATCH 1/4] mm/page_alloc/vmstat: Simplify refresh_cpu_vm_stats change detection
Currently, refresh_cpu_vm_stats returns an int, indicating how many
changes were made during its updates. Using this information, callers
like vmstat_update can heuristically determine if more work will be done
in the future.
However, all of refresh_cpu_vm_stats's callers either (a) ignore the
result, only caring about performing the updates, or (b) only care about
whether changes were made, but not *how many* changes were made.
Simplify the code by returning a bool instead to indicate if updates
were made.
In addition, simplify fold_diff and decay_pcp_high to return a bool
for the same reason.
Signed-off-by: Joshua Hahn <joshua.hahnjy@...il.com>
---
include/linux/gfp.h | 2 +-
mm/page_alloc.c | 8 ++++----
mm/vmstat.c | 26 +++++++++++++-------------
3 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5ebf26fcdcfa..63c72cb1d117 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -386,7 +386,7 @@ extern void free_pages(unsigned long addr, unsigned int order);
#define free_page(addr) free_pages((addr), 0)
void page_alloc_init_cpuhp(void);
-int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
+bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d1d037f97c5f..77e7d9a5f149 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2561,10 +2561,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* Called from the vmstat counter updater to decay the PCP high.
* Return whether there are addition works to do.
*/
-int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
+bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
{
int high_min, to_drain, batch;
- int todo = 0;
+ bool todo;
high_min = READ_ONCE(pcp->high_min);
batch = READ_ONCE(pcp->batch);
@@ -2577,7 +2577,7 @@ int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
pcp->high - (pcp->high >> 3), high_min);
if (pcp->high > high_min)
- todo++;
+ todo = true;
}
to_drain = pcp->count - pcp->high;
@@ -2585,7 +2585,7 @@ int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
spin_lock(&pcp->lock);
free_pcppages_bulk(zone, to_drain, pcp, 0);
spin_unlock(&pcp->lock);
- todo++;
+ todo = true;
}
return todo;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 71cd1ceba191..1f74a3517ab2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -771,25 +771,25 @@ EXPORT_SYMBOL(dec_node_page_state);
/*
* Fold a differential into the global counters.
- * Returns the number of counters updated.
+ * Returns whether counters were updated.
*/
static int fold_diff(int *zone_diff, int *node_diff)
{
int i;
- int changes = 0;
+ bool changed = false;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (zone_diff[i]) {
atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
- changes++;
+ changed = true;
}
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
if (node_diff[i]) {
atomic_long_add(node_diff[i], &vm_node_stat[i]);
- changes++;
+ changed = true;
}
- return changes;
+ return changed;
}
/*
@@ -806,16 +806,16 @@ static int fold_diff(int *zone_diff, int *node_diff)
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*
- * The function returns the number of global counters updated.
+ * The function returns whether global counters were updated.
*/
-static int refresh_cpu_vm_stats(bool do_pagesets)
+static bool refresh_cpu_vm_stats(bool do_pagesets)
{
struct pglist_data *pgdat;
struct zone *zone;
int i;
int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
- int changes = 0;
+ bool changed = false;
for_each_populated_zone(zone) {
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
@@ -839,7 +839,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
if (do_pagesets) {
cond_resched();
- changes += decay_pcp_high(zone, this_cpu_ptr(pcp));
+ changed |= decay_pcp_high(zone, this_cpu_ptr(pcp));
#ifdef CONFIG_NUMA
/*
* Deal with draining the remote pageset of this
@@ -861,13 +861,13 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
}
if (__this_cpu_dec_return(pcp->expire)) {
- changes++;
+ changed = true;
continue;
}
if (__this_cpu_read(pcp->count)) {
drain_zone_pages(zone, this_cpu_ptr(pcp));
- changes++;
+ changed = true;
}
#endif
}
@@ -887,8 +887,8 @@ static int refresh_cpu_vm_stats(bool do_pagesets)
}
}
- changes += fold_diff(global_zone_diff, global_node_diff);
- return changes;
+ changed |= fold_diff(global_zone_diff, global_node_diff);
+ return changed;
}
/*
--
2.47.3
Powered by blists - more mailing lists