[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230920190244.16839-6-ryncsn@gmail.com>
Date: Thu, 21 Sep 2023 03:02:43 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Yu Zhao <yuzhao@...gle.com>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...e.com>,
Hugh Dickins <hughd@...gle.com>, Nhat Pham <nphamcs@...il.com>,
Yuanchu Xie <yuanchu@...gle.com>,
Kalesh Singh <kaleshsingh@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
"T . J . Mercier" <tjmercier@...gle.com>,
linux-kernel@...r.kernel.org, Kairui Song <kasong@...cent.com>
Subject: [RFC PATCH v3 5/6] mm, lru_gen: convert avg_total and avg_refaulted to atomic
From: Kairui Song <kasong@...cent.com>
No feature change, prepare for later patch.
Signed-off-by: Kairui Song <kasong@...cent.com>
---
include/linux/mmzone.h | 4 ++--
mm/vmscan.c | 16 ++++++++--------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4106fbc5b4b3..d944987b67d3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -425,9 +425,9 @@ struct lru_gen_folio {
/* the multi-gen LRU sizes, eventually consistent */
long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the exponential moving average of refaulted */
- unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
/* the exponential moving average of evicted+protected */
- unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t avg_total[ANON_AND_FILE][MAX_NR_TIERS];
/* the first tier doesn't need protection, hence the minus one */
unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
/* can be modified without holding the LRU lock */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3f4de75e5186..82acc1934c86 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3705,9 +3705,9 @@ static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
- pos->refaulted = lrugen->avg_refaulted[type][tier] +
+ pos->refaulted = atomic_long_read(&lrugen->avg_refaulted[type][tier]) +
atomic_long_read(&lrugen->refaulted[hist][type][tier]);
- pos->total = lrugen->avg_total[type][tier] +
+ pos->total = atomic_long_read(&lrugen->avg_total[type][tier]) +
atomic_long_read(&lrugen->evicted[hist][type][tier]);
if (tier)
pos->total += lrugen->protected[hist][type][tier - 1];
@@ -3732,15 +3732,15 @@ static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
if (carryover) {
unsigned long sum;
- sum = lrugen->avg_refaulted[type][tier] +
+ sum = atomic_long_read(&lrugen->avg_refaulted[type][tier]) +
atomic_long_read(&lrugen->refaulted[hist][type][tier]);
- WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
+ atomic_long_set(&lrugen->avg_refaulted[type][tier], sum / 2);
- sum = lrugen->avg_total[type][tier] +
+ sum = atomic_long_read(&lrugen->avg_total[type][tier]) +
atomic_long_read(&lrugen->evicted[hist][type][tier]);
if (tier)
sum += lrugen->protected[hist][type][tier - 1];
- WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
+ atomic_long_set(&lrugen->avg_total[type][tier], sum / 2);
}
if (clear) {
@@ -5885,8 +5885,8 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
if (seq == max_seq) {
s = "RT ";
- n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
- n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
+ n[0] = atomic_long_read(&lrugen->avg_refaulted[type][tier]);
+ n[1] = atomic_long_read(&lrugen->avg_total[type][tier]);
} else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
s = "rep";
n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
--
2.41.0
Powered by blists - more mailing lists