[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210413065633.2782273-6-yuzhao@google.com>
Date: Tue, 13 Apr 2021 00:56:22 -0600
From: Yu Zhao <yuzhao@...gle.com>
To: linux-mm@...ck.org
Cc: Alex Shi <alexs@...nel.org>, Andi Kleen <ak@...ux.intel.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Benjamin Manes <ben.manes@...il.com>,
Dave Chinner <david@...morbit.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Hillf Danton <hdanton@...a.com>, Jens Axboe <axboe@...nel.dk>,
Johannes Weiner <hannes@...xchg.org>,
Jonathan Corbet <corbet@....net>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Matthew Wilcox <willy@...radead.org>,
Mel Gorman <mgorman@...e.de>,
Miaohe Lin <linmiaohe@...wei.com>,
Michael Larabel <michael@...haellarabel.com>,
Michal Hocko <mhocko@...e.com>,
Michel Lespinasse <michel@...pinasse.org>,
Rik van Riel <riel@...riel.com>,
Roman Gushchin <guro@...com>,
Rong Chen <rong.a.chen@...el.com>,
SeongJae Park <sjpark@...zon.de>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Vlastimil Babka <vbabka@...e.cz>,
Yang Shi <shy828301@...il.com>,
Ying Huang <ying.huang@...el.com>, Zi Yan <ziy@...dia.com>,
linux-kernel@...r.kernel.org, lkp@...ts.01.org,
page-reclaim@...gle.com, Yu Zhao <yuzhao@...gle.com>
Subject: [PATCH v2 05/16] mm/swap.c: export activate_page()
activate_page() is needed to activate pages that are already on lru or
queued in lru_pvecs.lru_add. The exported function is a merger between
the existing activate_page() and __lru_cache_activate_page().
Signed-off-by: Yu Zhao <yuzhao@...gle.com>
---
include/linux/swap.h | 1 +
mm/swap.c | 28 +++++++++++++++-------------
2 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4cc6ec3bf0ab..de2bbbf181ba 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -344,6 +344,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
+extern void activate_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
diff --git a/mm/swap.c b/mm/swap.c
index 31b844d4ed94..f20ed56ebbbf 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -334,7 +334,7 @@ static bool need_activate_page_drain(int cpu)
return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
}
-static void activate_page(struct page *page)
+static void activate_page_on_lru(struct page *page)
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -354,7 +354,7 @@ static inline void activate_page_drain(int cpu)
{
}
-static void activate_page(struct page *page)
+static void activate_page_on_lru(struct page *page)
{
struct lruvec *lruvec;
@@ -368,11 +368,22 @@ static void activate_page(struct page *page)
}
#endif
-static void __lru_cache_activate_page(struct page *page)
+/*
+ * If the page is on the LRU, queue it for activation via
+ * lru_pvecs.activate_page. Otherwise, assume the page is on a
+ * pagevec, mark it active and it'll be moved to the active
+ * LRU on the next drain.
+ */
+void activate_page(struct page *page)
{
struct pagevec *pvec;
int i;
+ if (PageLRU(page)) {
+ activate_page_on_lru(page);
+ return;
+ }
+
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
@@ -421,16 +432,7 @@ void mark_page_accessed(struct page *page)
* evictable page accessed has no effect.
*/
} else if (!PageActive(page)) {
- /*
- * If the page is on the LRU, queue it for activation via
- * lru_pvecs.activate_page. Otherwise, assume the page is on a
- * pagevec, mark it active and it'll be moved to the active
- * LRU on the next drain.
- */
- if (PageLRU(page))
- activate_page(page);
- else
- __lru_cache_activate_page(page);
+ activate_page(page);
ClearPageReferenced(page);
workingset_activation(page);
}
--
2.31.1.295.g9ea45b61b8-goog
Powered by blists - more mailing lists