[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230914104754.55-2-link@vivo.com>
Date: Thu, 14 Sep 2023 18:47:43 +0800
From: Huan Yang <link@...o.com>
To: Jonathan Corbet <corbet@....net>,
Andrew Morton <akpm@...ux-foundation.org>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeelb@...gle.com>,
Muchun Song <muchun.song@...ux.dev>,
Suleiman Souhlal <suleiman@...gle.com>,
Mike Rapoport <rppt@...nel.org>,
"Jan Alexander Steffens (heftig)" <heftig@...hlinux.org>,
Yu Zhao <yuzhao@...gle.com>, Huan Yang <link@...o.com>,
Brian Geffon <bgeffon@...gle.com>,
Gaosheng Cui <cuigaosheng1@...wei.com>,
Peter Xu <peterx@...hat.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
"T.J. Alumbaugh" <talumbau@...gle.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, cgroups@...r.kernel.org,
Oleksandr Natalenko <oleksandr@...alenko.name>,
Steven Barrett <steven@...uorix.net>,
Suren Baghdasaryan <surenb@...gle.com>
Cc: kernel@...o.com
Subject: [PATCH 1/3] mm: multi-gen LRU: fold lru_gen run cmd
This patch move LRU_GEN define helper into mm_inline which will
used in per memcg lru gen node.
let run aging/eviction into func, per memcg also will run this.
Signed-off-by: Huan Yang <link@...o.com>
---
include/linux/mm_inline.h | 9 ++++++++
mm/vmscan.c | 45 +++++++++++++++++++++------------------
2 files changed, 33 insertions(+), 21 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8148b30a9df1..b953b305c8a2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -101,6 +101,15 @@ static __always_inline enum lru_list folio_lru_list(struct folio *folio)
#ifdef CONFIG_LRU_GEN
+#define DEFINE_MAX_SEQ(lruvec) \
+ unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
+
+#define DEFINE_MIN_SEQ(lruvec) \
+ unsigned long min_seq[ANON_AND_FILE] = { \
+ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
+ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
+ }
+
#ifdef CONFIG_LRU_GEN_ENABLED
static inline bool lru_gen_enabled(void)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ea57a43ebd6b..f59977964e81 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3285,15 +3285,6 @@ static bool should_clear_pmd_young(void)
#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset))
-#define DEFINE_MAX_SEQ(lruvec) \
- unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
-
-#define DEFINE_MIN_SEQ(lruvec) \
- unsigned long min_seq[ANON_AND_FILE] = { \
- READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
- READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
- }
-
#define for_each_gen_type_zone(gen, type, zone) \
for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
@@ -6058,6 +6049,29 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
return -EINTR;
}
+static int __process_one_cmd(char cmd, struct lruvec *lruvec, unsigned long seq,
+ struct scan_control *sc, int swappiness,
+ unsigned long opt)
+{
+ int err;
+
+ if (swappiness < 0)
+ swappiness = get_swappiness(lruvec, sc);
+ else if (swappiness > 200)
+ return -EINVAL;
+
+ switch (cmd) {
+ case '+':
+ err = run_aging(lruvec, seq, sc, swappiness, opt);
+ break;
+ case '-':
+ err = run_eviction(lruvec, seq, sc, swappiness, opt);
+ break;
+ }
+
+ return err;
+}
+
static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
struct scan_control *sc, int swappiness, unsigned long opt)
{
@@ -6086,19 +6100,8 @@ static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
lruvec = get_lruvec(memcg, nid);
- if (swappiness < 0)
- swappiness = get_swappiness(lruvec, sc);
- else if (swappiness > 200)
- goto done;
+ err = __process_one_cmd(cmd, lruvec, seq, sc, swappiness, opt);
- switch (cmd) {
- case '+':
- err = run_aging(lruvec, seq, sc, swappiness, opt);
- break;
- case '-':
- err = run_eviction(lruvec, seq, sc, swappiness, opt);
- break;
- }
done:
mem_cgroup_put(memcg);
--
2.34.1
Powered by blists - more mailing lists