[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a9977cb2-3dce-4be1-81a3-23e760082922@suse.cz>
Date: Wed, 30 Apr 2025 13:42:47 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Shakeel Butt <shakeel.butt@...ux.dev>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>, Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>, linux-mm@...ck.org,
cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
Meta kernel team <kernel-team@...a.com>, bpf <bpf@...r.kernel.org>
Subject: Re: [PATCH 2/4] memcg: separate local_trylock for memcg and obj
On 4/30/25 01:04, Shakeel Butt wrote:
> The per-cpu stock_lock protects cached memcg and cached objcg and their
> respective fields. However there is no dependency between these fields
> and it is better to have fine grained separate locks for cached memcg
> and cached objcg. This decoupling of locks allows us to make the memcg
> charge cache and objcg charge cache to be nmi safe independently.
>
> At the moment, memcg charge cache is already nmi safe and this
> decoupling will allow to make memcg charge cache work without disabling
> irqs.
>
> Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
> ---
> mm/memcontrol.c | 52 +++++++++++++++++++++++++++----------------------
> 1 file changed, 29 insertions(+), 23 deletions(-)
> @@ -1883,19 +1885,22 @@ static void drain_local_stock(struct work_struct *dummy)
> struct memcg_stock_pcp *stock;
> unsigned long flags;
>
> - /*
> - * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
> - * drain_stock races is that we always operate on local CPU stock
> - * here with IRQ disabled
> - */
> - local_lock_irqsave(&memcg_stock.stock_lock, flags);
> + if (WARN_ONCE(!in_task(), "drain in non-task context"))
> + return;
>
> + preempt_disable();
> stock = this_cpu_ptr(&memcg_stock);
> +
> + local_lock_irqsave(&memcg_stock.obj_lock, flags);
> drain_obj_stock(stock);
> + local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
> +
> + local_lock_irqsave(&memcg_stock.memcg_lock, flags);
> drain_stock_fully(stock);
> - clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
> + local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
>
> - local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
> + clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
> + preempt_enable();
This usage of preempt_disable() looks rather weird and makes RT unhappy as
the local lock is a mutex, so it gives you this:
BUG: sleeping function called from invalid context at
kernel/locking/spinlock_rt.c:48
I know the next patch removes it again but for bisectability purposes it
should be avoided. Instead of preempt_disable() we can extend the local lock
scope here?
> }
>
> static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> @@ -1918,10 +1923,10 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
>
> if (nr_pages > MEMCG_CHARGE_BATCH ||
> - !local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
> + !local_trylock_irqsave(&memcg_stock.memcg_lock, flags)) {
> /*
> * In case of larger than batch refill or unlikely failure to
> - * lock the percpu stock_lock, uncharge memcg directly.
> + * lock the percpu memcg_lock, uncharge memcg directly.
> */
> memcg_uncharge(memcg, nr_pages);
> return;
> @@ -1953,7 +1958,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> WRITE_ONCE(stock->nr_pages[i], nr_pages);
> }
>
> - local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
> + local_unlock_irqrestore(&memcg_stock.memcg_lock, flags);
> }
>
> static bool is_drain_needed(struct memcg_stock_pcp *stock,
> @@ -2028,11 +2033,12 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
>
> stock = &per_cpu(memcg_stock, cpu);
>
> - /* drain_obj_stock requires stock_lock */
> - local_lock_irqsave(&memcg_stock.stock_lock, flags);
> + /* drain_obj_stock requires obj_lock */
> + local_lock_irqsave(&memcg_stock.obj_lock, flags);
> drain_obj_stock(stock);
> - local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
> + local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
>
> + /* no need for the local lock */
> drain_stock_fully(stock);
>
> return 0;
> @@ -2885,7 +2891,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
> unsigned long flags;
> bool ret = false;
>
> - local_lock_irqsave(&memcg_stock.stock_lock, flags);
> + local_lock_irqsave(&memcg_stock.obj_lock, flags);
>
> stock = this_cpu_ptr(&memcg_stock);
> if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
> @@ -2896,7 +2902,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
> __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
> }
>
> - local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
> + local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
>
> return ret;
> }
> @@ -2985,7 +2991,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
> unsigned long flags;
> unsigned int nr_pages = 0;
>
> - local_lock_irqsave(&memcg_stock.stock_lock, flags);
> + local_lock_irqsave(&memcg_stock.obj_lock, flags);
>
> stock = this_cpu_ptr(&memcg_stock);
> if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
> @@ -3007,7 +3013,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
> stock->nr_bytes &= (PAGE_SIZE - 1);
> }
>
> - local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
> + local_unlock_irqrestore(&memcg_stock.obj_lock, flags);
>
> if (nr_pages)
> obj_cgroup_uncharge_pages(objcg, nr_pages);
Powered by blists - more mailing lists