[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190611231813.3148843-6-guro@fb.com>
Date: Tue, 11 Jun 2019 16:18:08 -0700
From: Roman Gushchin <guro@...com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Vladimir Davydov <vdavydov.dev@...il.com>
CC: <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
<kernel-team@...com>, Johannes Weiner <hannes@...xchg.org>,
Shakeel Butt <shakeelb@...gle.com>,
Waiman Long <longman@...hat.com>, Roman Gushchin <guro@...com>
Subject: [PATCH v7 05/10] mm: unify SLAB and SLUB page accounting
Currently the page accounting code is duplicated in SLAB and SLUB
internals. Let's move it into new (un)charge_slab_page helpers
in the slab_common.c file. These helpers will be responsible
for statistics (global and memcg-aware) and memcg charging.
So they are replacing direct memcg_(un)charge_slab() calls.
Signed-off-by: Roman Gushchin <guro@...com>
Reviewed-by: Shakeel Butt <shakeelb@...gle.com>
Acked-by: Christoph Lameter <cl@...ux.com>
Acked-by: Vladimir Davydov <vdavydov.dev@...il.com>
Acked-by: Johannes Weiner <hannes@...xchg.org>
---
mm/slab.c | 19 +++----------------
mm/slab.h | 25 +++++++++++++++++++++++++
mm/slub.c | 14 ++------------
3 files changed, 30 insertions(+), 28 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 4b865393ebb4..b417824a9b15 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1360,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct page *page;
- int nr_pages;
flags |= cachep->allocflags;
@@ -1370,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}
- if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
+ if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
__free_pages(page, cachep->gfporder);
return NULL;
}
- nr_pages = (1 << cachep->gfporder);
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
- else
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
-
__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1395,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
int order = cachep->gfporder;
- unsigned long nr_freed = (1 << order);
-
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
- else
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page);
@@ -1409,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page->mapping = NULL;
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += nr_freed;
- memcg_uncharge_slab(page, order, cachep);
+ current->reclaim_state->reclaimed_slab += 1 << order;
+ uncharge_slab_page(page, order, cachep);
__free_pages(page, order);
}
diff --git a/mm/slab.h b/mm/slab.h
index dc83583ee9dd..46623a576a3c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+static inline int cache_vmstat_idx(struct kmem_cache *s)
+{
+ return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
+}
+
#ifdef CONFIG_MEMCG_KMEM
/* List of all root caches. */
@@ -361,6 +367,25 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache;
}
+static __always_inline int charge_slab_page(struct page *page,
+ gfp_t gfp, int order,
+ struct kmem_cache *s)
+{
+ int ret = memcg_charge_slab(page, gfp, order, s);
+
+ if (!ret)
+ mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order);
+
+ return ret;
+}
+
+static __always_inline void uncharge_slab_page(struct page *page, int order,
+ struct kmem_cache *s)
+{
+ mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order));
+ memcg_uncharge_slab(page, order, s);
+}
+
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
diff --git a/mm/slub.c b/mm/slub.c
index ae3b1e49ecec..6a5174b51cd6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1488,7 +1488,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
- if (page && memcg_charge_slab(page, flags, order, s)) {
+ if (page && charge_slab_page(page, flags, order, s)) {
__free_pages(page, order);
page = NULL;
}
@@ -1681,11 +1681,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
return NULL;
- mod_lruvec_page_state(page,
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- 1 << oo_order(oo));
-
inc_slabs_node(s, page_to_nid(page), page->objects);
return page;
@@ -1719,18 +1714,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
check_object(s, page, p, SLUB_RED_INACTIVE);
}
- mod_lruvec_page_state(page,
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- -pages);
-
__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
page->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- memcg_uncharge_slab(page, order, s);
+ uncharge_slab_page(page, order, s);
__free_pages(page, order);
}
--
2.21.0
Powered by blists - more mailing lists