lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210301062227.59292-2-songmuchun@bytedance.com>
Date:   Mon,  1 Mar 2021 14:22:23 +0800
From:   Muchun Song <songmuchun@...edance.com>
To:     viro@...iv.linux.org.uk, jack@...e.cz, amir73il@...il.com,
        ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
        kafai@...com, songliubraving@...com, yhs@...com,
        john.fastabend@...il.com, kpsingh@...nel.org, mingo@...hat.com,
        peterz@...radead.org, juri.lelli@...hat.com,
        vincent.guittot@...aro.org, dietmar.eggemann@....com,
        rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
        bristot@...hat.com, hannes@...xchg.org, mhocko@...nel.org,
        vdavydov.dev@...il.com, akpm@...ux-foundation.org,
        shakeelb@...gle.com, guro@...com, songmuchun@...edance.com,
        alex.shi@...ux.alibaba.com, alexander.h.duyck@...ux.intel.com,
        chris@...isdown.name, richard.weiyang@...il.com, vbabka@...e.cz,
        mathieu.desnoyers@...icios.com, posk@...gle.com, jannh@...gle.com,
        iamjoonsoo.kim@....com, daniel.vetter@...ll.ch, longman@...hat.com,
        walken@...gle.com, christian.brauner@...ntu.com,
        ebiederm@...ssion.com, keescook@...omium.org,
        krisman@...labora.com, esyr@...hat.com, surenb@...gle.com,
        elver@...gle.com
Cc:     linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        netdev@...r.kernel.org, bpf@...r.kernel.org,
        cgroups@...r.kernel.org, linux-mm@...ck.org,
        duanxiongchun@...edance.com
Subject: [PATCH 1/5] mm: memcontrol: introduce obj_cgroup_{un}charge_page

We know that the unit of charging slab object is bytes, the unit of
charging kmem page is PAGE_SIZE. So If we want to reuse obj_cgroup
APIs to charge the kmem pages, we should pass PAGE_SIZE (as third
parameter) to obj_cgroup_charge(). Because the charing size is page
size, we always need to refill objcg stock. This is pointless. As we
already know the charing size. So we can directly skip touch the
objcg stock and introduce obj_cgroup_{un}charge_page() to charge or
uncharge a kmem page.

In the later patch, we can reuse those helpers to charge/uncharge
the kmem pages. This is just code movement without any functional
change.

Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
 mm/memcontrol.c | 46 +++++++++++++++++++++++++++++++---------------
 1 file changed, 31 insertions(+), 15 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2db2aeac8a9e..2eafbae504ac 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3060,6 +3060,34 @@ static void memcg_free_cache_id(int id)
 	ida_simple_remove(&memcg_cache_ida, id);
 }
 
+static inline void obj_cgroup_uncharge_page(struct obj_cgroup *objcg,
+					    unsigned int nr_pages)
+{
+	rcu_read_lock();
+	__memcg_kmem_uncharge(obj_cgroup_memcg(objcg), nr_pages);
+	rcu_read_unlock();
+}
+
+static int obj_cgroup_charge_page(struct obj_cgroup *objcg, gfp_t gfp,
+				  unsigned int nr_pages)
+{
+	struct mem_cgroup *memcg;
+	int ret;
+
+	rcu_read_lock();
+retry:
+	memcg = obj_cgroup_memcg(objcg);
+	if (unlikely(!css_tryget(&memcg->css)))
+		goto retry;
+	rcu_read_unlock();
+
+	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
+
+	css_put(&memcg->css);
+
+	return ret;
+}
+
 /**
  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
  * @memcg: memory cgroup to charge
@@ -3184,11 +3212,8 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
 
-		if (nr_pages) {
-			rcu_read_lock();
-			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
-			rcu_read_unlock();
-		}
+		if (nr_pages)
+			obj_cgroup_uncharge_page(old, nr_pages);
 
 		/*
 		 * The leftover is flushed to the centralized per-memcg value.
@@ -3246,7 +3271,6 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
 
 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
 {
-	struct mem_cgroup *memcg;
 	unsigned int nr_pages, nr_bytes;
 	int ret;
 
@@ -3263,24 +3287,16 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
 	 * refill_obj_stock(), called from this function or
 	 * independently later.
 	 */
-	rcu_read_lock();
-retry:
-	memcg = obj_cgroup_memcg(objcg);
-	if (unlikely(!css_tryget(&memcg->css)))
-		goto retry;
-	rcu_read_unlock();
-
 	nr_pages = size >> PAGE_SHIFT;
 	nr_bytes = size & (PAGE_SIZE - 1);
 
 	if (nr_bytes)
 		nr_pages += 1;
 
-	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
+	ret = obj_cgroup_charge_page(objcg, gfp, nr_pages);
 	if (!ret && nr_bytes)
 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
 
-	css_put(&memcg->css);
 	return ret;
 }
 
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ