[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220810151840.16394-14-laoar.shao@gmail.com>
Date: Wed, 10 Aug 2022 15:18:38 +0000
From: Yafang Shao <laoar.shao@...il.com>
To: ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
kafai@...com, songliubraving@...com, yhs@...com,
john.fastabend@...il.com, kpsingh@...nel.org, sdf@...gle.com,
haoluo@...gle.com, jolsa@...nel.org, hannes@...xchg.org,
mhocko@...nel.org, roman.gushchin@...ux.dev, shakeelb@...gle.com,
songmuchun@...edance.com, akpm@...ux-foundation.org
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org, linux-mm@...ck.org,
Yafang Shao <laoar.shao@...il.com>
Subject: [PATCH bpf-next 13/15] mm, memcg: Add new helper get_obj_cgroup_from_cgroup
Introduce new helper get_obj_cgroup_from_cgroup() to get obj_cgroup from
a specific cgroup.
Signed-off-by: Yafang Shao <laoar.shao@...il.com>
---
include/linux/memcontrol.h | 1 +
mm/memcontrol.c | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 42 insertions(+)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2f0a611..901a921 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1713,6 +1713,7 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
+struct obj_cgroup *get_obj_cgroup_from_cgroup(struct cgroup *cgrp);
struct obj_cgroup *get_obj_cgroup_from_current(void);
struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 618c366..762cffa 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2908,6 +2908,47 @@ static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
return objcg;
}
+static struct obj_cgroup *get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
+{
+ struct obj_cgroup *objcg;
+
+ if (memcg_kmem_bypass())
+ return NULL;
+
+ rcu_read_lock();
+ objcg = __get_obj_cgroup_from_memcg(memcg);
+ rcu_read_unlock();
+ return objcg;
+}
+
+struct obj_cgroup *get_obj_cgroup_from_cgroup(struct cgroup *cgrp)
+{
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *memcg;
+ struct obj_cgroup *objcg;
+
+ rcu_read_lock();
+ css = rcu_dereference(cgrp->subsys[memory_cgrp_id]);
+ if (!css || !css_tryget_online(css)) {
+ rcu_read_unlock();
+ cgroup_put(cgrp);
+ return ERR_PTR(-EINVAL);
+ }
+ rcu_read_unlock();
+ cgroup_put(cgrp);
+
+ memcg = mem_cgroup_from_css(css);
+ if (!memcg) {
+ css_put(css);
+ return ERR_PTR(-EINVAL);
+ }
+
+ objcg = get_obj_cgroup_from_memcg(memcg);
+ css_put(css);
+
+ return objcg;
+}
+
__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
{
struct obj_cgroup *objcg = NULL;
--
1.8.3.1
Powered by blists - more mailing lists