[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230313112819.38938-5-zhengqi.arch@bytedance.com>
Date: Mon, 13 Mar 2023 19:28:15 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: akpm@...ux-foundation.org, tkhai@...ru, vbabka@...e.cz,
christian.koenig@....com, hannes@...xchg.org, shakeelb@...gle.com,
mhocko@...nel.org, roman.gushchin@...ux.dev, muchun.song@...ux.dev,
david@...hat.com, shy828301@...il.com
Cc: sultan@...neltoast.com, dave@...olabs.net,
penguin-kernel@...ove.SAKURA.ne.jp, paulmck@...nel.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Qi Zheng <zhengqi.arch@...edance.com>
Subject: [PATCH v5 4/8] mm: vmscan: add shrinker_srcu_generation
From: Kirill Tkhai <tkhai@...ru>
After we make slab shrink lockless with SRCU, the longest
sleep unregister_shrinker() will be a sleep waiting for
all do_shrink_slab() calls.
To avoid long unbreakable action in the unregister_shrinker(),
add shrinker_srcu_generation to restore a check similar to the
rwsem_is_contendent() check that we had before.
And for memcg slab shrink, we unlock SRCU and continue
iterations from the next shrinker id.
Signed-off-by: Kirill Tkhai <tkhai@...ru>
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/vmscan.c | 24 ++++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ce7834030f75..5c2a22454320 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -204,6 +204,7 @@ static void set_task_reclaim_state(struct task_struct *task,
LIST_HEAD(shrinker_list);
DECLARE_RWSEM(shrinker_rwsem);
DEFINE_SRCU(shrinker_srcu);
+static atomic_t shrinker_srcu_generation = ATOMIC_INIT(0);
#ifdef CONFIG_MEMCG
static int shrinker_nr_max;
@@ -776,6 +777,7 @@ void unregister_shrinker(struct shrinker *shrinker)
debugfs_entry = shrinker_debugfs_remove(shrinker);
up_write(&shrinker_rwsem);
+ atomic_inc(&shrinker_srcu_generation);
synchronize_srcu(&shrinker_srcu);
debugfs_remove_recursive(debugfs_entry);
@@ -797,6 +799,7 @@ void synchronize_shrinkers(void)
{
down_write(&shrinker_rwsem);
up_write(&shrinker_rwsem);
+ atomic_inc(&shrinker_srcu_generation);
synchronize_srcu(&shrinker_srcu);
}
EXPORT_SYMBOL(synchronize_shrinkers);
@@ -906,18 +909,20 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
{
struct shrinker_info *info;
unsigned long ret, freed = 0;
- int srcu_idx;
- int i;
+ int srcu_idx, generation;
+ int i = 0;
if (!mem_cgroup_online(memcg))
return 0;
+again:
srcu_idx = srcu_read_lock(&shrinker_srcu);
info = shrinker_info_srcu(memcg, nid);
if (unlikely(!info))
goto unlock;
- for_each_set_bit(i, info->map, info->map_nr_max) {
+ generation = atomic_read(&shrinker_srcu_generation);
+ for_each_set_bit_from(i, info->map, info->map_nr_max) {
struct shrink_control sc = {
.gfp_mask = gfp_mask,
.nid = nid,
@@ -963,6 +968,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
set_shrinker_bit(memcg, nid, i);
}
freed += ret;
+ if (atomic_read(&shrinker_srcu_generation) != generation) {
+ srcu_read_unlock(&shrinker_srcu, srcu_idx);
+ i++;
+ goto again;
+ }
}
unlock:
srcu_read_unlock(&shrinker_srcu, srcu_idx);
@@ -1002,7 +1012,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
{
unsigned long ret, freed = 0;
struct shrinker *shrinker;
- int srcu_idx;
+ int srcu_idx, generation;
/*
* The root memcg might be allocated even though memcg is disabled
@@ -1016,6 +1026,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
srcu_idx = srcu_read_lock(&shrinker_srcu);
+ generation = atomic_read(&shrinker_srcu_generation);
list_for_each_entry_srcu(shrinker, &shrinker_list, list,
srcu_read_lock_held(&shrinker_srcu)) {
struct shrink_control sc = {
@@ -1028,6 +1039,11 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
if (ret == SHRINK_EMPTY)
ret = 0;
freed += ret;
+
+ if (atomic_read(&shrinker_srcu_generation) != generation) {
+ freed = freed ? : 1;
+ break;
+ }
}
srcu_read_unlock(&shrinker_srcu, srcu_idx);
--
2.20.1
Powered by blists - more mailing lists