[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200608230654.828134-15-guro@fb.com>
Date: Mon, 8 Jun 2020 16:06:49 -0700
From: Roman Gushchin <guro@...com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>
CC: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Shakeel Butt <shakeelb@...gle.com>, <linux-mm@...ck.org>,
Vlastimil Babka <vbabka@...e.cz>, <kernel-team@...com>,
<linux-kernel@...r.kernel.org>, Roman Gushchin <guro@...com>
Subject: [PATCH v6 14/19] mm: memcg/slab: remove memcg_kmem_get_cache()
The memcg_kmem_get_cache() function became really trivial,
so let's just inline it into the single call point:
memcg_slab_pre_alloc_hook().
It will make the code less bulky and can also help the compiler
to generate a better code.
Signed-off-by: Roman Gushchin <guro@...com>
Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
---
include/linux/memcontrol.h | 2 --
mm/memcontrol.c | 25 +------------------------
mm/slab.h | 11 +++++++++--
mm/slab_common.c | 2 +-
4 files changed, 11 insertions(+), 29 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ed0d2ac6a5d2..eede46c43573 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
}
#endif
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
-
#ifdef CONFIG_MEMCG_KMEM
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
unsigned int nr_pages);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2695cdc15baa..09a84326ead1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -393,7 +393,7 @@ void memcg_put_cache_ids(void)
/*
* A lot of the calls to the cache allocation functions are expected to be
- * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
+ * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
* conditional to this static branch, we'll have to allow modules that does
* kmem_cache_alloc and the such to see this symbol as well
*/
@@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id)
ida_simple_remove(&memcg_cache_ida, id);
}
-/**
- * memcg_kmem_get_cache: select memcg or root cache for allocation
- * @cachep: the original global kmem cache
- *
- * Return the kmem_cache we're supposed to use for a slab allocation.
- *
- * If the cache does not exist yet, if we are the first user of it, we
- * create it asynchronously in a workqueue and let the current allocation
- * go through with the original cache.
- */
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
-{
- struct kmem_cache *memcg_cachep;
-
- memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
- if (unlikely(!memcg_cachep)) {
- queue_work(system_wq, &cachep->memcg_params.work);
- return cachep;
- }
-
- return memcg_cachep;
-}
-
/**
* __memcg_kmem_charge: charge a number of kernel pages to a memcg
* @memcg: memory cgroup to charge
diff --git a/mm/slab.h b/mm/slab.h
index c6c7987dfd85..f4033298a776 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -333,9 +333,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
if (memcg_kmem_bypass())
return s;
- cachep = memcg_kmem_get_cache(s);
- if (is_root_cache(cachep))
+ cachep = READ_ONCE(s->memcg_params.memcg_cache);
+ if (unlikely(!cachep)) {
+ /*
+ * If memcg cache does not exist yet, we schedule it's
+ * asynchronous creation and let the current allocation
+ * go through with the root cache.
+ */
+ queue_work(system_wq, &s->memcg_params.work);
return s;
+ }
objcg = get_obj_cgroup_from_current();
if (!objcg)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 10aa2acb84ca..f8874a159637 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -568,7 +568,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache)
}
/*
- * Since readers won't lock (see memcg_kmem_get_cache()), we need a
+ * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a
* barrier here to ensure nobody will see the kmem_cache partially
* initialized.
*/
--
2.25.4
Powered by blists - more mailing lists