[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210310174603.5093-5-shy828301@gmail.com>
Date: Wed, 10 Mar 2021 09:45:54 -0800
From: Yang Shi <shy828301@...il.com>
To: guro@...com, ktkhai@...tuozzo.com, vbabka@...e.cz,
shakeelb@...gle.com, david@...morbit.com, hannes@...xchg.org,
mhocko@...e.com, akpm@...ux-foundation.org
Cc: shy828301@...il.com, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [v9 PATCH 04/13] mm: vmscan: remove memcg_shrinker_map_size
Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the
map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both.
Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the
bit map.
Acked-by: Kirill Tkhai <ktkhai@...tuozzo.com>
Acked-by: Roman Gushchin <guro@...com>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
Reviewed-by: Shakeel Butt <shakeelb@...gle.com>
Signed-off-by: Yang Shi <shy828301@...il.com>
---
mm/vmscan.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 75fd8038a6c8..bda67e1ac84b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -185,8 +185,12 @@ static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
+static int shrinker_nr_max;
-static int memcg_shrinker_map_size;
+static inline int shrinker_map_size(int nr_items)
+{
+ return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
+}
static void free_shrinker_map_rcu(struct rcu_head *head)
{
@@ -247,7 +251,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
return 0;
down_write(&shrinker_rwsem);
- size = memcg_shrinker_map_size;
+ size = shrinker_map_size(shrinker_nr_max);
for_each_node(nid) {
map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
if (!map) {
@@ -265,12 +269,13 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
static int expand_shrinker_maps(int new_id)
{
int size, old_size, ret = 0;
+ int new_nr_max = new_id + 1;
struct mem_cgroup *memcg;
- size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
- old_size = memcg_shrinker_map_size;
+ size = shrinker_map_size(new_nr_max);
+ old_size = shrinker_map_size(shrinker_nr_max);
if (size <= old_size)
- return 0;
+ goto out;
if (!root_mem_cgroup)
goto out;
@@ -289,7 +294,7 @@ static int expand_shrinker_maps(int new_id)
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
out:
if (!ret)
- memcg_shrinker_map_size = size;
+ shrinker_nr_max = new_nr_max;
return ret;
}
@@ -322,7 +327,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
static DEFINE_IDR(shrinker_idr);
-static int shrinker_nr_max;
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
@@ -339,8 +343,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
idr_remove(&shrinker_idr, id);
goto unlock;
}
-
- shrinker_nr_max = id + 1;
}
shrinker->id = id;
ret = 0;
--
2.26.2
Powered by blists - more mailing lists