[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210912132914.GA56674@shbuild999.sh.intel.com>
Date: Sun, 12 Sep 2021 21:29:14 +0800
From: Feng Tang <feng.tang@...el.com>
To: Hillf Danton <hdanton@...a.com>
Cc: Shakeel Butt <shakeelb@...gle.com>,
LKML <linux-kernel@...r.kernel.org>,
Xing Zhengjun <zhengjun.xing@...ux.intel.com>,
Linux MM <linux-mm@...ck.org>
Subject: Re: [memcg] 45208c9105: aim7.jobs-per-min -14.0% regression
On Sun, Sep 12, 2021 at 07:17:56PM +0800, Hillf Danton wrote:
[...]
> > +// if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
> > + if (!(__this_cpu_inc_return(stats_flush_threshold) % 128))
> > queue_work(system_unbound_wq, &stats_flush_work);
> > }
>
> Hi Feng,
>
> Would you please check if it helps fix the regression to avoid queuing a
> queued work by adding and checking an atomic counter.
Hi Hillf,
I just tested your patch, and it didn't recover the regression, but
just reduced it from -14% to around -13%, similar to the patch
increasing the batch charge number.
Thanks,
Feng
> Hillf
>
> --- x/mm/memcontrol.c
> +++ y/mm/memcontrol.c
> @@ -108,6 +108,7 @@ static void flush_memcg_stats_dwork(stru
> static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
> static void flush_memcg_stats_work(struct work_struct *w);
> static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
> +static atomic_t sfwork_queued;
> static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
> static DEFINE_SPINLOCK(stats_flush_lock);
>
> @@ -660,8 +661,13 @@ void __mod_memcg_lruvec_state(struct lru
>
> /* Update lruvec */
> __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
> - if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
> - queue_work(system_unbound_wq, &stats_flush_work);
> + if (!(__this_cpu_inc_return(stats_flush_threshold) %
> + MEMCG_CHARGE_BATCH)) {
> + int queued = atomic_read(&sfwork_queued);
> +
> + if (!queued && atomic_try_cmpxchg(&sfwork_queued, &queued, 1))
> + queue_work(system_unbound_wq, &stats_flush_work);
> + }
> }
>
> /**
> @@ -5376,6 +5382,7 @@ static void flush_memcg_stats_dwork(stru
> static void flush_memcg_stats_work(struct work_struct *w)
> {
> mem_cgroup_flush_stats();
> + atomic_dec(&sfwork_queued);
> }
>
> static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
Powered by blists - more mailing lists