[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1587970985-21629-14-git-send-email-alex.shi@linux.alibaba.com>
Date: Mon, 27 Apr 2020 15:03:02 +0800
From: Alex Shi <alex.shi@...ux.alibaba.com>
To: akpm@...ux-foundation.org, mgorman@...hsingularity.net,
tj@...nel.org, hughd@...gle.com, khlebnikov@...dex-team.ru,
daniel.m.jordan@...cle.com, yang.shi@...ux.alibaba.com,
willy@...radead.org, hannes@...xchg.org, lkp@...el.com,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org, shakeelb@...gle.com,
iamjoonsoo.kim@....com, richard.weiyang@...il.com
Cc: Alex Shi <alex.shi@...ux.alibaba.com>,
Thomas Gleixner <tglx@...utronix.de>,
Andrey Ryabinin <aryabinin@...tuozzo.com>
Subject: [PATCH v10 12/15] mm/lru: introduce the relock_page_lruvec function
Use this new function to replace repeated same code.
Signed-off-by: Alex Shi <alex.shi@...ux.alibaba.com>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Andrey Ryabinin <aryabinin@...tuozzo.com>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Konstantin Khlebnikov <khlebnikov@...dex-team.ru>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: Tejun Heo <tj@...nel.org>
Cc: linux-kernel@...r.kernel.org
Cc: cgroups@...r.kernel.org
Cc: linux-mm@...ck.org
---
include/linux/memcontrol.h | 36 ++++++++++++++++++++++++++++++++++++
mm/mlock.c | 9 +--------
mm/swap.c | 23 +++++------------------
mm/vmscan.c | 8 +-------
4 files changed, 43 insertions(+), 33 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f69e9788cd1c..1cf82b6747b5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1298,6 +1298,42 @@ static inline void dec_lruvec_page_state(struct page *page,
mod_lruvec_page_state(page, idx, -1);
}
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+ struct lruvec *locked_lruvec)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
+
+ if (likely(locked_lruvec == lruvec))
+ return lruvec;
+
+ if (unlikely(locked_lruvec))
+ unlock_page_lruvec_irq(locked_lruvec);
+
+ return lock_page_lruvec_irq(page);
+}
+
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+ struct lruvec *locked_lruvec, unsigned long *flags)
+{
+ struct pglist_data *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
+
+ if (likely(locked_lruvec == lruvec))
+ return lruvec;
+
+ if (unlikely(locked_lruvec))
+ unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+
+ return lock_page_lruvec_irqsave(page, flags);
+}
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
diff --git a/mm/mlock.c b/mm/mlock.c
index a0e43acb9ba4..d06f0a9054e3 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -289,17 +289,10 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
/* Phase 1: page isolation */
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
bool clearlru;
clearlru = TestClearPageLRU(page);
-
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
+ lruvec = relock_page_lruvec_irq(page, lruvec);
if (!TestClearPageMlocked(page)) {
delta_munlocked++;
diff --git a/mm/swap.c b/mm/swap.c
index d5a7eda448e3..cc0a710c6506 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -193,15 +193,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
(*move_fn)(page, lruvec, arg);
}
if (lruvec)
@@ -825,17 +818,11 @@ void release_pages(struct page **pages, int nr)
}
if (TestClearPageLRU(page)) {
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page,
- page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec,
- flags);
+ struct lruvec *pre_lruvec = lruvec;
+
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+ if (pre_lruvec != lruvec)
lock_batch = 0;
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5266f887b2f5..6dd3e1be5bbb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4295,15 +4295,9 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
pgscanned++;
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
+ lruvec = relock_page_lruvec_irq(page, lruvec);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
--
1.8.3.1
Powered by blists - more mailing lists