[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220801234258.134609-2-atomlin@redhat.com>
Date: Tue, 2 Aug 2022 00:42:57 +0100
From: Aaron Tomlin <atomlin@...hat.com>
To: frederic@...nel.org, mtosatti@...hat.com
Cc: cl@...ux.com, tglx@...utronix.de, mingo@...nel.org,
peterz@...radead.org, pauld@...hat.com, neelx@...hat.com,
oleksandr@...alenko.name, atomlin@...mlin.com,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: [PATCH v5 1/2] mm/vmstat: Use per cpu variable to track a vmstat discrepancy
This patch incorporates an idea from Marcelo's patch [1] where a
CPU-specific variable namely vmstat_dirty is used to indicate if
a vmstat imbalance is present for a given CPU. Therefore, at the
appropriate time, we can fold all the remaining differentials.
[1]: https://lore.kernel.org/lkml/20220204173554.763888172@fedora.localdomain/
Signed-off-by: Aaron Tomlin <atomlin@...hat.com>
---
mm/vmstat.c | 46 +++++++++++++++-------------------------------
1 file changed, 15 insertions(+), 31 deletions(-)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 373d2730fcf2..51564b7c85fe 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -195,6 +195,12 @@ void fold_vm_numa_events(void)
#endif
#ifdef CONFIG_SMP
+static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
+
+static inline void mark_vmstat_dirty(void)
+{
+ this_cpu_write(vmstat_dirty, true);
+}
int calculate_pressure_threshold(struct zone *zone)
{
@@ -367,6 +373,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
+ mark_vmstat_dirty();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
@@ -405,6 +412,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
+ mark_vmstat_dirty();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
@@ -603,6 +611,7 @@ static inline void mod_zone_state(struct zone *zone,
if (z)
zone_page_state_add(z, zone, item);
+ mark_vmstat_dirty();
}
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@@ -671,6 +680,7 @@ static inline void mod_node_state(struct pglist_data *pgdat,
if (z)
node_page_state_add(z, pgdat, item);
+ mark_vmstat_dirty();
}
void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
@@ -1873,6 +1883,7 @@ int sysctl_stat_interval __read_mostly = HZ;
static void refresh_vm_stats(struct work_struct *work)
{
refresh_cpu_vm_stats(true);
+ this_cpu_write(vmstat_dirty, false);
}
int vmstat_refresh(struct ctl_table *table, int write,
@@ -1937,6 +1948,7 @@ int vmstat_refresh(struct ctl_table *table, int write,
static void vmstat_update(struct work_struct *w)
{
if (refresh_cpu_vm_stats(true)) {
+ this_cpu_write(vmstat_dirty, false);
/*
* Counters were updated so we expect more updates
* to occur in the future. Keep on running the
@@ -1948,35 +1960,6 @@ static void vmstat_update(struct work_struct *w)
}
}
-/*
- * Check if the diffs for a certain cpu indicate that
- * an update is needed.
- */
-static bool need_update(int cpu)
-{
- pg_data_t *last_pgdat = NULL;
- struct zone *zone;
-
- for_each_populated_zone(zone) {
- struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
- struct per_cpu_nodestat *n;
-
- /*
- * The fast way of checking if there are any vmstat diffs.
- */
- if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
- return true;
-
- if (last_pgdat == zone->zone_pgdat)
- continue;
- last_pgdat = zone->zone_pgdat;
- n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
- if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
- return true;
- }
- return false;
-}
-
/*
* Switch off vmstat processing and then fold all the remaining differentials
* until the diffs stay at zero. The function is used by NOHZ and can only be
@@ -1990,7 +1973,7 @@ void quiet_vmstat(void)
if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
return;
- if (!need_update(smp_processor_id()))
+ if (!__this_cpu_read(vmstat_dirty))
return;
/*
@@ -2000,6 +1983,7 @@ void quiet_vmstat(void)
* vmstat_shepherd will take care about that for us.
*/
refresh_cpu_vm_stats(false);
+ __this_cpu_write(vmstat_dirty, false);
}
/*
@@ -2021,7 +2005,7 @@ static void vmstat_shepherd(struct work_struct *w)
for_each_online_cpu(cpu) {
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
- if (!delayed_work_pending(dw) && need_update(cpu))
+ if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
cond_resched();
--
2.37.1
Powered by blists - more mailing lists