[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210914072938.6440-7-songmuchun@bytedance.com>
Date: Tue, 14 Sep 2021 15:28:28 +0800
From: Muchun Song <songmuchun@...edance.com>
To: willy@...radead.org, akpm@...ux-foundation.org, hannes@...xchg.org,
mhocko@...nel.org, vdavydov.dev@...il.com, shakeelb@...gle.com,
guro@...com, shy828301@...il.com, alexs@...nel.org,
richard.weiyang@...il.com, david@...morbit.com,
trond.myklebust@...merspace.com, anna.schumaker@...app.com
Cc: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-nfs@...r.kernel.org,
zhengqi.arch@...edance.com, duanxiongchun@...edance.com,
fam.zheng@...edance.com, smuchun@...il.com,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v3 06/76] mm: list_lru: only add memcg-aware lrus to the global lru list
The non-memcg-aware lru is always skiped when traversing the global lru
list, which is not efficient. We can only add the memcg-aware lru to the
global lru list instead to make traversing more efficient.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/list_lru.c | 35 ++++++++++++++++-------------------
1 file changed, 16 insertions(+), 19 deletions(-)
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 6b2f3cbe5f67..39828632631c 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -15,18 +15,29 @@
#include "slab.h"
#ifdef CONFIG_MEMCG_KMEM
-static LIST_HEAD(list_lrus);
+static LIST_HEAD(memcg_list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);
+static inline bool list_lru_memcg_aware(struct list_lru *lru)
+{
+ return lru->memcg_aware;
+}
+
static void list_lru_register(struct list_lru *lru)
{
+ if (!list_lru_memcg_aware(lru))
+ return;
+
mutex_lock(&list_lrus_mutex);
- list_add(&lru->list, &list_lrus);
+ list_add(&lru->list, &memcg_list_lrus);
mutex_unlock(&list_lrus_mutex);
}
static void list_lru_unregister(struct list_lru *lru)
{
+ if (!list_lru_memcg_aware(lru))
+ return;
+
mutex_lock(&list_lrus_mutex);
list_del(&lru->list);
mutex_unlock(&list_lrus_mutex);
@@ -37,11 +48,6 @@ static int lru_shrinker_id(struct list_lru *lru)
return lru->shrinker_id;
}
-static inline bool list_lru_memcg_aware(struct list_lru *lru)
-{
- return lru->memcg_aware;
-}
-
static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
@@ -458,9 +464,6 @@ static int memcg_update_list_lru(struct list_lru *lru,
{
int i;
- if (!list_lru_memcg_aware(lru))
- return 0;
-
for_each_node(i) {
if (memcg_update_list_lru_node(&lru->node[i],
old_size, new_size))
@@ -483,9 +486,6 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
{
int i;
- if (!list_lru_memcg_aware(lru))
- return;
-
for_each_node(i)
memcg_cancel_update_list_lru_node(&lru->node[i],
old_size, new_size);
@@ -498,7 +498,7 @@ int memcg_update_all_list_lrus(int new_size)
int old_size = memcg_nr_cache_ids;
mutex_lock(&list_lrus_mutex);
- list_for_each_entry(lru, &list_lrus, list) {
+ list_for_each_entry(lru, &memcg_list_lrus, list) {
ret = memcg_update_list_lru(lru, old_size, new_size);
if (ret)
goto fail;
@@ -507,7 +507,7 @@ int memcg_update_all_list_lrus(int new_size)
mutex_unlock(&list_lrus_mutex);
return ret;
fail:
- list_for_each_entry_continue_reverse(lru, &list_lrus, list)
+ list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
memcg_cancel_update_list_lru(lru, old_size, new_size);
goto out;
}
@@ -544,9 +544,6 @@ static void memcg_drain_list_lru(struct list_lru *lru,
{
int i;
- if (!list_lru_memcg_aware(lru))
- return;
-
for_each_node(i)
memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
}
@@ -556,7 +553,7 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
struct list_lru *lru;
mutex_lock(&list_lrus_mutex);
- list_for_each_entry(lru, &list_lrus, list)
+ list_for_each_entry(lru, &memcg_list_lrus, list)
memcg_drain_list_lru(lru, src_idx, dst_memcg);
mutex_unlock(&list_lrus_mutex);
}
--
2.11.0
Powered by blists - more mailing lists