[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87y0rdobq1.fsf@linux.dev>
Date: Wed, 20 Aug 2025 15:43:50 -0700
From: Roman Gushchin <roman.gushchin@...ux.dev>
To: Kumar Kartikeya Dwivedi <memxor@...il.com>
Cc: linux-mm@...ck.org, bpf@...r.kernel.org, Suren Baghdasaryan
<surenb@...gle.com>, Johannes Weiner <hannes@...xchg.org>, Michal Hocko
<mhocko@...e.com>, David Rientjes <rientjes@...gle.com>, Matt Bobrowski
<mattbobrowski@...gle.com>, Song Liu <song@...nel.org>, Alexei
Starovoitov <ast@...nel.org>, Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 04/14] mm: introduce bpf kfuncs to deal with memcg
pointers
Kumar Kartikeya Dwivedi <memxor@...il.com> writes:
> On Mon, 18 Aug 2025 at 19:02, Roman Gushchin <roman.gushchin@...ux.dev> wrote:
>>
>> To effectively operate with memory cgroups in bpf there is a need
>> to convert css pointers to memcg pointers. A simple container_of
>> cast which is used in the kernel code can't be used in bpf because
>> from the verifier's point of view that's a out-of-bounds memory access.
>>
>> Introduce helper get/put kfuncs which can be used to get
>> a refcounted memcg pointer from the css pointer:
>> - bpf_get_mem_cgroup,
>> - bpf_put_mem_cgroup.
>>
>> bpf_get_mem_cgroup() can take both memcg's css and the corresponding
>> cgroup's "self" css. It allows it to be used with the existing cgroup
>> iterator which iterates over cgroup tree, not memcg tree.
>>
>> Signed-off-by: Roman Gushchin <roman.gushchin@...ux.dev>
>> ---
>> include/linux/memcontrol.h | 2 +
>> mm/Makefile | 1 +
>> mm/bpf_memcontrol.c | 151 +++++++++++++++++++++++++++++++++++++
>> 3 files changed, 154 insertions(+)
>> create mode 100644 mm/bpf_memcontrol.c
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index 87b6688f124a..785a064000cd 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -932,6 +932,8 @@ static inline void mod_memcg_page_state(struct page *page,
>> rcu_read_unlock();
>> }
>>
>> +unsigned long memcg_events(struct mem_cgroup *memcg, int event);
>> +unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
>> unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
>> unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
>> unsigned long lruvec_page_state_local(struct lruvec *lruvec,
>> diff --git a/mm/Makefile b/mm/Makefile
>> index a714aba03759..c397af904a87 100644
>> --- a/mm/Makefile
>> +++ b/mm/Makefile
>> @@ -107,6 +107,7 @@ obj-$(CONFIG_MEMCG) += swap_cgroup.o
>> endif
>> ifdef CONFIG_BPF_SYSCALL
>> obj-y += bpf_oom.o
>> +obj-$(CONFIG_MEMCG) += bpf_memcontrol.o
>> endif
>> obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
>> obj-$(CONFIG_GUP_TEST) += gup_test.o
>> diff --git a/mm/bpf_memcontrol.c b/mm/bpf_memcontrol.c
>> new file mode 100644
>> index 000000000000..66f2a359af7e
>> --- /dev/null
>> +++ b/mm/bpf_memcontrol.c
>> @@ -0,0 +1,151 @@
>> +// SPDX-License-Identifier: GPL-2.0-or-later
>> +/*
>> + * Memory Controller-related BPF kfuncs and auxiliary code
>> + *
>> + * Author: Roman Gushchin <roman.gushchin@...ux.dev>
>> + */
>> +
>> +#include <linux/memcontrol.h>
>> +#include <linux/bpf.h>
>> +
>> +__bpf_kfunc_start_defs();
>> +
>> +/**
>> + * bpf_get_mem_cgroup - Get a reference to a memory cgroup
>> + * @css: pointer to the css structure
>> + *
>> + * Returns a pointer to a mem_cgroup structure after bumping
>> + * the corresponding css's reference counter.
>> + *
>> + * It's fine to pass a css which belongs to any cgroup controller,
>> + * e.g. unified hierarchy's main css.
>> + *
>> + * Implements KF_ACQUIRE semantics.
>> + */
>> +__bpf_kfunc struct mem_cgroup *
>> +bpf_get_mem_cgroup(struct cgroup_subsys_state *css)
>> +{
>> + struct mem_cgroup *memcg = NULL;
>> + bool rcu_unlock = false;
>> +
>> + if (!root_mem_cgroup)
>> + return NULL;
>> +
>> + if (root_mem_cgroup->css.ss != css->ss) {
>> + struct cgroup *cgroup = css->cgroup;
>> + int ssid = root_mem_cgroup->css.ss->id;
>> +
>> + rcu_read_lock();
>> + rcu_unlock = true;
>> + css = rcu_dereference_raw(cgroup->subsys[ssid]);
>> + }
>> +
>> + if (css && css_tryget(css))
>> + memcg = container_of(css, struct mem_cgroup, css);
>> +
>> + if (rcu_unlock)
>> + rcu_read_unlock();
>> +
>> + return memcg;
>> +}
>> +
>> +/**
>> + * bpf_put_mem_cgroup - Put a reference to a memory cgroup
>> + * @memcg: memory cgroup to release
>> + *
>> + * Releases a previously acquired memcg reference.
>> + * Implements KF_RELEASE semantics.
>> + */
>> +__bpf_kfunc void bpf_put_mem_cgroup(struct mem_cgroup *memcg)
>> +{
>> + css_put(&memcg->css);
>> +}
>> +
>> +/**
>> + * bpf_mem_cgroup_events - Read memory cgroup's event counter
>> + * @memcg: memory cgroup
>> + * @event: event idx
>> + *
>> + * Allows to read memory cgroup event counters.
>> + */
>> +__bpf_kfunc unsigned long bpf_mem_cgroup_events(struct mem_cgroup *memcg, int event)
>> +{
>> +
>> + if (event < 0 || event >= NR_VM_EVENT_ITEMS)
>> + return (unsigned long)-1;
>> +
>> + return memcg_events(memcg, event);
>> +}
>> +
>> +/**
>> + * bpf_mem_cgroup_usage - Read memory cgroup's usage
>> + * @memcg: memory cgroup
>> + *
>> + * Returns current memory cgroup size in bytes.
>> + */
>> +__bpf_kfunc unsigned long bpf_mem_cgroup_usage(struct mem_cgroup *memcg)
>> +{
>> + return page_counter_read(&memcg->memory);
>> +}
>> +
>> +/**
>> + * bpf_mem_cgroup_events - Read memory cgroup's page state counter
>> + * @memcg: memory cgroup
>> + * @event: event idx
>> + *
>> + * Allows to read memory cgroup statistics.
>> + */
>> +__bpf_kfunc unsigned long bpf_mem_cgroup_page_state(struct mem_cgroup *memcg, int idx)
>> +{
>> + if (idx < 0 || idx >= MEMCG_NR_STAT)
>> + return (unsigned long)-1;
>> +
>> + return memcg_page_state(memcg, idx);
>> +}
>> +
>> +/**
>> + * bpf_mem_cgroup_flush_stats - Flush memory cgroup's statistics
>> + * @memcg: memory cgroup
>> + *
>> + * Propagate memory cgroup's statistics up the cgroup tree.
>> + *
>> + * Note, that this function uses the rate-limited version of
>> + * mem_cgroup_flush_stats() to avoid hurting the system-wide
>> + * performance. So bpf_mem_cgroup_flush_stats() guarantees only
>> + * that statistics is not stale beyond 2*FLUSH_TIME.
>> + */
>> +__bpf_kfunc void bpf_mem_cgroup_flush_stats(struct mem_cgroup *memcg)
>> +{
>> + mem_cgroup_flush_stats_ratelimited(memcg);
>> +}
>> +
>> +__bpf_kfunc_end_defs();
>> +
>> +BTF_KFUNCS_START(bpf_memcontrol_kfuncs)
>> +BTF_ID_FLAGS(func, bpf_get_mem_cgroup, KF_ACQUIRE | KF_RET_NULL)
>
> I think you could set KF_TRUSTED_ARGS for this as well.
Not really. The intended use case is to iterate over the cgroup tree,
which gives non-trusted css pointers:
bpf_for_each(css, css_pos, &root_memcg->css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
memcg = bpf_get_mem_cgroup(css_pos);
}
Thanks
Powered by blists - more mailing lists