[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4847c300-c7bb-4259-867c-4bbf4d760576@arm.com>
Date: Thu, 5 Feb 2026 10:50:06 +0530
From: Dev Jain <dev.jain@....com>
To: Shakeel Butt <shakeel.butt@...ux.dev>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Johannes Weiner <hannes@...xchg.org>, Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>, Harry Yoo <harry.yoo@...cle.com>,
Qi Zheng <qi.zheng@...ux.dev>, Vlastimil Babka <vbabka@...e.cz>,
linux-mm@...ck.org, cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
Meta kernel team <kernel-team@...a.com>
Subject: Re: [PATCH 1/4] memcg: use mod_node_page_state to update stats
On 05/02/26 2:08 am, Shakeel Butt wrote:
> On Mon, Feb 02, 2026 at 02:23:54PM +0530, Dev Jain wrote:
>> On 02/02/26 10:24 am, Shakeel Butt wrote:
>>>>>> Hello Shakeel,
>>>>>>
>>>>>> We are seeing a regression in micromm/munmap benchmark with this patch, on arm64 -
>>>>>> the benchmark mmmaps a lot of memory, memsets it, and measures the time taken
>>>>>> to munmap. Please see below if my understanding of this patch is correct.
>>>>>>
>>>>> Thanks for the report. Are you seeing regression in just the benchmark
>>>>> or some real workload as well? Also how much regression are you seeing?
>>>>> I have a kernel rebot regression report [1] for this patch as well which
>>>>> says 2.6% regression and thus it was on the back-burner for now. I will
>>>>> take look at this again soon.
>>>>>
>>>> The munmap regression is ~24%. Haven't observed a regression in any other
>>>> benchmark yet.
>>> Please share the code/benchmark which shows such regression, also if you can
>>> share the perf profile, that would be awesome.
>> https://gitlab.arm.com/tooling/fastpath/-/blob/main/containers/microbench/micromm.c
>> You can run this with
>> ./micromm 0 munmap 10
>>
>> Don't have a perf profile, I measured the time taken by above command, with and
>> without the patch.
>>
> Hi Dev, can you please try the following patch?
>
>
> From 40155feca7e7bc846800ab8449735bdb03164d6d Mon Sep 17 00:00:00 2001
> From: Shakeel Butt <shakeel.butt@...ux.dev>
> Date: Wed, 4 Feb 2026 08:46:08 -0800
> Subject: [PATCH] vmstat: use preempt disable instead of try_cmpxchg
>
> Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
> ---
> include/linux/mmzone.h | 2 +-
> mm/vmstat.c | 58 ++++++++++++++++++------------------------
> 2 files changed, 26 insertions(+), 34 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 3e51190a55e4..499cd53efdd6 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -776,7 +776,7 @@ struct per_cpu_zonestat {
>
> struct per_cpu_nodestat {
> s8 stat_threshold;
> - s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
> + long vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
> };
>
> #endif /* !__GENERATING_BOUNDS.H */
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 86b14b0f77b5..0930695597bb 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -377,7 +377,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
> long delta)
> {
> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
> + long __percpu *p = pcp->vm_node_stat_diff + item;
> long x;
> long t;
>
> @@ -456,8 +456,8 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
> void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
> {
> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
> - s8 v, t;
> + long __percpu *p = pcp->vm_node_stat_diff + item;
> + long v, t;
>
> VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
>
> @@ -467,7 +467,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
> v = __this_cpu_inc_return(*p);
> t = __this_cpu_read(pcp->stat_threshold);
> if (unlikely(v > t)) {
> - s8 overstep = t >> 1;
> + long overstep = t >> 1;
>
> node_page_state_add(v + overstep, pgdat, item);
> __this_cpu_write(*p, -overstep);
> @@ -512,8 +512,8 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
> void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
> {
> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
> - s8 v, t;
> + long __percpu *p = pcp->vm_node_stat_diff + item;
> + long v, t;
>
> VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
>
> @@ -523,7 +523,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
> v = __this_cpu_dec_return(*p);
> t = __this_cpu_read(pcp->stat_threshold);
> if (unlikely(v < - t)) {
> - s8 overstep = t >> 1;
> + long overstep = t >> 1;
>
> node_page_state_add(v - overstep, pgdat, item);
> __this_cpu_write(*p, overstep);
> @@ -619,9 +619,8 @@ static inline void mod_node_state(struct pglist_data *pgdat,
> enum node_stat_item item, int delta, int overstep_mode)
> {
> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
> - long n, t, z;
> - s8 o;
> + long __percpu *p = pcp->vm_node_stat_diff + item;
> + long o, n, t, z;
>
> if (vmstat_item_in_bytes(item)) {
> /*
> @@ -634,32 +633,25 @@ static inline void mod_node_state(struct pglist_data *pgdat,
> delta >>= PAGE_SHIFT;
> }
>
> + preempt_disable();
> +
> o = this_cpu_read(*p);
> - do {
> - z = 0; /* overflow to node counters */
> + n = o + delta;
>
> - /*
> - * The fetching of the stat_threshold is racy. We may apply
> - * a counter threshold to the wrong the cpu if we get
> - * rescheduled while executing here. However, the next
> - * counter update will apply the threshold again and
> - * therefore bring the counter under the threshold again.
> - *
> - * Most of the time the thresholds are the same anyways
> - * for all cpus in a node.
> - */
> - t = this_cpu_read(pcp->stat_threshold);
> + t = this_cpu_read(pcp->stat_threshold);
> + z = 0;
>
> - n = delta + (long)o;
> + if (abs(n) > t) {
> + int os = overstep_mode * (t >> 1);
>
> - if (abs(n) > t) {
> - int os = overstep_mode * (t >> 1) ;
> + /* Overflow must be added to node counters */
> + z = n + os;
> + n = -os;
> + }
>
> - /* Overflow must be added to node counters */
> - z = n + os;
> - n = -os;
> - }
> - } while (!this_cpu_try_cmpxchg(*p, &o, n));
> + this_cpu_add(*p, n - o);
> +
> + preempt_enable();
>
> if (z)
> node_page_state_add(z, pgdat, item);
> @@ -866,7 +858,7 @@ static bool refresh_cpu_vm_stats(bool do_pagesets)
> struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
>
> for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
> - int v;
> + long v;
>
> v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
> if (v) {
> @@ -929,7 +921,7 @@ void cpu_vm_stats_fold(int cpu)
>
> for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
> if (p->vm_node_stat_diff[i]) {
> - int v;
> + long v;
>
> v = p->vm_node_stat_diff[i];
> p->vm_node_stat_diff[i] = 0;
Thanks for looking into this.
But this doesn't solve it :( preempt_disable() contains a compiler barrier,
probably that's why.
Also can you confirm whether my analysis of the regression was correct?
Because if it was, then this diff looks wrong - AFAIU preempt_disable()
won't stop an irq handler from interrupting the execution, so this
will introduce a bug for code paths running in irq context.
Powered by blists - more mailing lists