[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200827175215.319780-2-guro@fb.com>
Date: Thu, 27 Aug 2020 10:52:12 -0700
From: Roman Gushchin <guro@...com>
To: <linux-mm@...ck.org>
CC: Andrew Morton <akpm@...ux-foundation.org>,
=Shakeel Butt <shakeelb@...gle.com>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>, <kernel-team@...com>,
<linux-kernel@...r.kernel.org>, Roman Gushchin <guro@...com>
Subject: [PATCH RFC 1/4] mm: kmem: move memcg_kmem_bypass() calls to get_mem/obj_cgroup_from_current()
Currently memcg_kmem_bypass() is called before obtaining the current
memory/obj cgroup using get_mem/obj_cgroup_from_current(). Moving
memcg_kmem_bypass() into get_mem/obj_cgroup_from_current() reduces
the number of call sites and allows further code simplifications.
Signed-off-by: Roman Gushchin <guro@...com>
---
mm/memcontrol.c | 13 ++++++++-----
mm/percpu.c | 3 +--
mm/slab.h | 3 ---
3 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dc892a3c4b17..9c08d8d14bc0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1074,6 +1074,9 @@ EXPORT_SYMBOL(get_mem_cgroup_from_page);
*/
static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
+ if (memcg_kmem_bypass())
+ return NULL;
+
if (unlikely(current->active_memcg)) {
struct mem_cgroup *memcg;
@@ -2913,6 +2916,9 @@ __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg;
+ if (memcg_kmem_bypass())
+ return NULL;
+
if (unlikely(!current->mm && !current->active_memcg))
return NULL;
@@ -3039,19 +3045,16 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
struct mem_cgroup *memcg;
int ret = 0;
- if (memcg_kmem_bypass())
- return 0;
-
memcg = get_mem_cgroup_from_current();
- if (!mem_cgroup_is_root(memcg)) {
+ if (memcg && !mem_cgroup_is_root(memcg)) {
ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
if (!ret) {
page->mem_cgroup = memcg;
__SetPageKmemcg(page);
return 0;
}
+ css_put(&memcg->css);
}
- css_put(&memcg->css);
return ret;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index f4709629e6de..9b07bd5bc45f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1584,8 +1584,7 @@ static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
{
struct obj_cgroup *objcg;
- if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT) ||
- memcg_kmem_bypass())
+ if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
return PCPU_CHUNK_ROOT;
objcg = get_obj_cgroup_from_current();
diff --git a/mm/slab.h b/mm/slab.h
index 95e5cc1bb2a3..4a24e1702923 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -280,9 +280,6 @@ static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
{
struct obj_cgroup *objcg;
- if (memcg_kmem_bypass())
- return NULL;
-
objcg = get_obj_cgroup_from_current();
if (!objcg)
return NULL;
--
2.26.2
Powered by blists - more mailing lists