[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <162f1ae4-2adf-4133-8de4-20f240e5469e@redhat.com>
Date: Fri, 18 Apr 2025 22:06:40 -0400
From: Waiman Long <llong@...hat.com>
To: Gregory Price <gourry@...rry.net>, cgroups@...r.kernel.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org, kernel-team@...a.com,
tj@...nel.org, hannes@...xchg.org, mkoutny@...e.com, mhocko@...nel.org,
roman.gushchin@...ux.dev, shakeel.butt@...ux.dev, muchun.song@...ux.dev,
akpm@...ux-foundation.org
Subject: Re: [PATCH v2 2/2] vmscan,cgroup: apply mems_effective to reclaim
On 4/17/25 11:13 PM, Gregory Price wrote:
> It is possible for a reclaimer to cause demotions of an lruvec belonging
> to a cgroup with cpuset.mems set to exclude some nodes. Attempt to apply
> this limitation based on the lruvec's memcg and prevent demotion.
>
> Notably, this may still allow demotion of shared libraries or any memory
> first instantiated in another cgroup. This means cpusets still cannot
> cannot guarantee complete isolation when demotion is enabled, and the
> docs have been updated to reflect this.
>
> This is useful for isolating workloads on a multi-tenant system from
> certain classes of memory more consistently - with the noted exceptions.
>
> Signed-off-by: Gregory Price <gourry@...rry.net>
> ---
> .../ABI/testing/sysfs-kernel-mm-numa | 14 ++++---
> include/linux/cgroup.h | 7 ++++
> include/linux/cpuset.h | 5 +++
> include/linux/memcontrol.h | 9 ++++
> kernel/cgroup/cgroup.c | 5 +++
> kernel/cgroup/cpuset.c | 22 ++++++++++
> mm/vmscan.c | 41 +++++++++++--------
> 7 files changed, 82 insertions(+), 21 deletions(-)
>
> diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-numa b/Documentation/ABI/testing/sysfs-kernel-mm-numa
> index 77e559d4ed80..27cdcab901f7 100644
> --- a/Documentation/ABI/testing/sysfs-kernel-mm-numa
> +++ b/Documentation/ABI/testing/sysfs-kernel-mm-numa
> @@ -16,9 +16,13 @@ Description: Enable/disable demoting pages during reclaim
> Allowing page migration during reclaim enables these
> systems to migrate pages from fast tiers to slow tiers
> when the fast tier is under pressure. This migration
> - is performed before swap. It may move data to a NUMA
> - node that does not fall into the cpuset of the
> - allocating process which might be construed to violate
> - the guarantees of cpusets. This should not be enabled
> - on systems which need strict cpuset location
> + is performed before swap if an eligible numa node is
> + present in cpuset.mems for the cgroup. If cpusets.mems
> + changes at runtime, it may move data to a NUMA node that
> + does not fall into the cpuset of the new cpusets.mems,
> + which might be construed to violate the guarantees of
> + cpusets. Shared memory, such as libraries, owned by
> + another cgroup may still be demoted and result in memory
> + use on a node not present in cpusets.mem. This should not
> + be enabled on systems which need strict cpuset location
> guarantees.
> diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
> index f8ef47f8a634..2915250a3e5e 100644
> --- a/include/linux/cgroup.h
> +++ b/include/linux/cgroup.h
> @@ -632,6 +632,8 @@ static inline void cgroup_kthread_ready(void)
>
> void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
> struct cgroup *cgroup_get_from_id(u64 id);
> +
> +extern bool cgroup_node_allowed(struct cgroup *cgroup, int nid);
> #else /* !CONFIG_CGROUPS */
>
> struct cgroup_subsys_state;
> @@ -681,6 +683,11 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
>
> static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
> {}
> +
> +static inline bool cgroup_node_allowed(struct cgroup *cgroup, int nid)
> +{
> + return true;
> +}
> #endif /* !CONFIG_CGROUPS */
>
> #ifdef CONFIG_CGROUPS
> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
> index 893a4c340d48..c64b4a174456 100644
> --- a/include/linux/cpuset.h
> +++ b/include/linux/cpuset.h
> @@ -171,6 +171,7 @@ static inline void set_mems_allowed(nodemask_t nodemask)
> task_unlock(current);
> }
>
> +extern bool cpuset_node_allowed(struct cgroup *cgroup, int nid);
> #else /* !CONFIG_CPUSETS */
>
> static inline bool cpusets_enabled(void) { return false; }
> @@ -282,6 +283,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
> return false;
> }
>
> +static inline bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
> +{
> + return false;
> +}
> #endif /* !CONFIG_CPUSETS */
>
> #endif /* _LINUX_CPUSET_H */
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 53364526d877..2906e4bb12e9 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1736,6 +1736,11 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
> rcu_read_unlock();
> }
>
> +static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
> +{
> + return memcg ? cgroup_node_allowed(memcg->css.cgroup, nid) : true;
> +}
> +
> #else
> static inline bool mem_cgroup_kmem_disabled(void)
> {
> @@ -1793,6 +1798,10 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
> {
> }
>
> +static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
> +{
> + return true;
> +}
> #endif /* CONFIG_MEMCG */
>
> #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
> index afc665b7b1fe..ba0b90cd774c 100644
> --- a/kernel/cgroup/cgroup.c
> +++ b/kernel/cgroup/cgroup.c
> @@ -7038,6 +7038,11 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
> return 0;
> }
>
> +bool cgroup_node_allowed(struct cgroup *cgroup, int nid)
> +{
> + return cpuset_node_allowed(cgroup, nid);
> +}
> +
> /*
> * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
> * definition in cgroup-defs.h.
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index d6ed3f053e62..31e4c4cbcdfc 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -4163,6 +4163,28 @@ bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
> return allowed;
> }
>
> +bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
> +{
> + struct cgroup_subsys_state *css;
> + unsigned long flags;
> + struct cpuset *cs;
> + bool allowed;
> +
> + css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
> + if (!css)
> + return true;
> +
> + cs = container_of(css, struct cpuset, css);
> + spin_lock_irqsave(&callback_lock, flags);
> + /* At least one parent must have a valid node list */
> + while (nodes_empty(cs->effective_mems))
> + cs = parent_cs(cs);
For cgroup v2, effective_mems should always be set and walking up the
tree isn't necessary. For v1, it can be empty, but memory cgroup and
cpuset are unlikely in the same hierarchy.
Cheers,
Longman
Powered by blists - more mailing lists