lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aGKHXWJl0ECKN1Zh@hyeyoo>
Date: Mon, 30 Jun 2025 21:47:25 +0900
From: Harry Yoo <harry.yoo@...cle.com>
To: Muchun Song <songmuchun@...edance.com>
Cc: hannes@...xchg.org, mhocko@...nel.org, roman.gushchin@...ux.dev,
        shakeel.butt@...ux.dev, muchun.song@...ux.dev,
        akpm@...ux-foundation.org, david@...morbit.com,
        zhengqi.arch@...edance.com, yosry.ahmed@...ux.dev, nphamcs@...il.com,
        chengming.zhou@...ux.dev, linux-kernel@...r.kernel.org,
        cgroups@...r.kernel.org, linux-mm@...ck.org,
        hamzamahfooz@...ux.microsoft.com, apais@...ux.microsoft.com
Subject: Re: [PATCH RFC 26/28] mm: memcontrol: introduce memcg_reparent_ops

On Tue, Apr 15, 2025 at 10:45:30AM +0800, Muchun Song wrote:
> In the previous patch, we established a method to ensure the safety of the
> lruvec lock and the split queue lock during the reparenting of LRU folios.
> The process involves the following steps:
> 
>     memcg_reparent_objcgs(memcg)
>         1) lock
>         // lruvec belongs to memcg and lruvec_parent belongs to parent memcg.
>         spin_lock(&lruvec->lru_lock);
>         spin_lock(&lruvec_parent->lru_lock);
> 
>         2) relocate from current memcg to its parent
>         // Move all the pages from the lruvec list to the parent lruvec list.
> 
>         3) unlock
>         spin_unlock(&lruvec_parent->lru_lock);
>         spin_unlock(&lruvec->lru_lock);
> 
> In addition to the folio lruvec lock, the deferred split queue lock
> (specific to THP) also requires a similar approach. Therefore, we abstract
> the three essential steps from the memcg_reparent_objcgs() function.
> 
>     memcg_reparent_objcgs(memcg)
>         1) lock
>         memcg_reparent_ops->lock(memcg, parent);
> 
>         2) relocate
>         memcg_reparent_ops->relocate(memcg, reparent);
> 
>         3) unlock
>         memcg_reparent_ops->unlock(memcg, reparent);
> 
> Currently, two distinct locks (such as the lruvec lock and the deferred
> split queue lock) need to utilize this infrastructure. In the subsequent
> patch, we will employ these APIs to ensure the safety of these locks
> during the reparenting of LRU folios.
> 
> Signed-off-by: Muchun Song <songmuchun@...edance.com>
> ---
>  include/linux/memcontrol.h | 20 ++++++++++++
>  mm/memcontrol.c            | 62 ++++++++++++++++++++++++++++++--------
>  2 files changed, 69 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 27b23e464229..0e450623f8fa 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -311,6 +311,26 @@ struct mem_cgroup {
>  	struct mem_cgroup_per_node *nodeinfo[];
>  };
>  
> +struct memcg_reparent_ops {
> +	/*
> +	 * Note that interrupt is disabled before calling those callbacks,
> +	 * so the interrupt should remain disabled when leaving those callbacks.
> +	 */
> +	void (*lock)(struct mem_cgroup *src, struct mem_cgroup *dst);
> +	void (*relocate)(struct mem_cgroup *src, struct mem_cgroup *dst);
> +	void (*unlock)(struct mem_cgroup *src, struct mem_cgroup *dst);
> +};
> +
> +#define DEFINE_MEMCG_REPARENT_OPS(name)					\
> +	const struct memcg_reparent_ops memcg_##name##_reparent_ops = {	\
> +		.lock		= name##_reparent_lock,			\
> +		.relocate	= name##_reparent_relocate,		\
> +		.unlock		= name##_reparent_unlock,		\
> +	}
> +
> +#define DECLARE_MEMCG_REPARENT_OPS(name)				\
> +	extern const struct memcg_reparent_ops memcg_##name##_reparent_ops
> +
>  /*
>   * size of first charge trial.
>   * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 1f0c6e7b69cc..3fac51179186 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -194,24 +194,60 @@ static struct obj_cgroup *obj_cgroup_alloc(void)
>  	return objcg;
>  }
>  
> -static void memcg_reparent_objcgs(struct mem_cgroup *memcg)
> +static void objcg_reparent_lock(struct mem_cgroup *src, struct mem_cgroup *dst)
> +{
> +	spin_lock(&objcg_lock);
> +}
> +
> +static void objcg_reparent_relocate(struct mem_cgroup *src, struct mem_cgroup *dst)
>  {
>  	struct obj_cgroup *objcg, *iter;
> -	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
>  
> -	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
> +	objcg = rcu_replace_pointer(src->objcg, NULL, true);
> +	/* 1) Ready to reparent active objcg. */
> +	list_add(&objcg->list, &src->objcg_list);
> +	/* 2) Reparent active objcg and already reparented objcgs to dst. */
> +	list_for_each_entry(iter, &src->objcg_list, list)
> +		WRITE_ONCE(iter->memcg, dst);
> +	/* 3) Move already reparented objcgs to the dst's list */
> +	list_splice(&src->objcg_list, &dst->objcg_list);
> +}
>  
> -	spin_lock_irq(&objcg_lock);
> +static void objcg_reparent_unlock(struct mem_cgroup *src, struct mem_cgroup *dst)
> +{
> +	spin_unlock(&objcg_lock);
> +}
>  
> -	/* 1) Ready to reparent active objcg. */
> -	list_add(&objcg->list, &memcg->objcg_list);
> -	/* 2) Reparent active objcg and already reparented objcgs to parent. */
> -	list_for_each_entry(iter, &memcg->objcg_list, list)
> -		WRITE_ONCE(iter->memcg, parent);
> -	/* 3) Move already reparented objcgs to the parent's list */
> -	list_splice(&memcg->objcg_list, &parent->objcg_list);
> -
> -	spin_unlock_irq(&objcg_lock);
> +static DEFINE_MEMCG_REPARENT_OPS(objcg);
> +
> +static const struct memcg_reparent_ops *memcg_reparent_ops[] = {
> +	&memcg_objcg_reparent_ops,
> +};
> +
> +#define DEFINE_MEMCG_REPARENT_FUNC(phase)				\
> +	static void memcg_reparent_##phase(struct mem_cgroup *src,	\
> +					   struct mem_cgroup *dst)	\
> +	{								\
> +		int i;							\
> +									\
> +		for (i = 0; i < ARRAY_SIZE(memcg_reparent_ops); i++)	\
> +			memcg_reparent_ops[i]->phase(src, dst);		\
> +	}
> +
> +DEFINE_MEMCG_REPARENT_FUNC(lock)
> +DEFINE_MEMCG_REPARENT_FUNC(relocate)
> +DEFINE_MEMCG_REPARENT_FUNC(unlock)
> +
> +static void memcg_reparent_objcgs(struct mem_cgroup *src)
> +{
> +	struct mem_cgroup *dst = parent_mem_cgroup(src);
> +	struct obj_cgroup *objcg = rcu_dereference_protected(src->objcg, true);
> +
> +	local_irq_disable();
> +	memcg_reparent_lock(src, dst);
> +	memcg_reparent_relocate(src, dst);
> +	memcg_reparent_unlock(src, dst);
> +	local_irq_enable();

Hi,

It seems unnecessarily complicated to 1) acquire objcg, lruvec and
thp_sq locks, 2) call their ->relocate() callbacks, and
3) release those locks.

Why not simply do the following instead?

for (i = 0; i < ARRAY_SIZE(memcg_reparent_ops); i++) {
	local_irq_disable();
	memcg_reparent_ops[i]->lock(src, dst);
	memcg_reparent_ops[i]->relocate(src, dst);
	memcg_reparent_ops[i]->unlock(src, dst);
	local_irq_enable();
}

As there is no actual lock dependency between the three.

Or am I missing something important about the locking requirements?

-- 
Cheers,
Harry / Hyeonggon

>  
>  	percpu_ref_kill(&objcg->refcnt);
>  }
> -- 
> 2.20.1
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ