[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210122220600.906146-7-yuzhao@google.com>
Date: Fri, 22 Jan 2021 15:05:56 -0700
From: Yu Zhao <yuzhao@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
Alex Shi <alex.shi@...ux.alibaba.com>
Cc: Michal Hocko <mhocko@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Vladimir Davydov <vdavydov.dev@...il.com>,
Roman Gushchin <guro@...com>, Vlastimil Babka <vbabka@...e.cz>,
Matthew Wilcox <willy@...radead.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Yu Zhao <yuzhao@...gle.com>
Subject: [PATCH v2 06/10] mm: add __clear_page_lru_flags() to replace page_off_lru()
Similar to page_off_lru(), the new function does non-atomic clearing
of PageLRU() in addition to PageActive() and PageUnevictable(), on a
page that has no references left.
If PageActive() and PageUnevictable() are both set, refuse to clear
either and leave them to bad_page(). This is a behavior change that
is meant to help debug.
Link: https://lore.kernel.org/linux-mm/20201207220949.830352-7-yuzhao@google.com/
Signed-off-by: Yu Zhao <yuzhao@...gle.com>
---
include/linux/mm_inline.h | 28 ++++++++++------------------
mm/swap.c | 6 ++----
mm/vmscan.c | 3 +--
3 files changed, 13 insertions(+), 24 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index ffacc6273678..ef3fd79222e5 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -61,27 +61,19 @@ static inline enum lru_list page_lru_base_type(struct page *page)
}
/**
- * page_off_lru - which LRU list was page on? clearing its lru flags.
- * @page: the page to test
- *
- * Returns the LRU list a page was on, as an index into the array of LRU
- * lists; and clears its Unevictable or Active flags, ready for freeing.
+ * __clear_page_lru_flags - clear page lru flags before releasing a page
+ * @page: the page that was on lru and now has a zero reference
*/
-static __always_inline enum lru_list page_off_lru(struct page *page)
+static __always_inline void __clear_page_lru_flags(struct page *page)
{
- enum lru_list lru;
+ __ClearPageLRU(page);
- if (PageUnevictable(page)) {
- __ClearPageUnevictable(page);
- lru = LRU_UNEVICTABLE;
- } else {
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- __ClearPageActive(page);
- lru += LRU_ACTIVE;
- }
- }
- return lru;
+ /* this shouldn't happen, so leave the flags to bad_page() */
+ if (PageActive(page) && PageUnevictable(page))
+ return;
+
+ __ClearPageActive(page);
+ __ClearPageUnevictable(page);
}
/**
diff --git a/mm/swap.c b/mm/swap.c
index 94532799ed82..38900d672051 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -84,9 +84,8 @@ static void __page_cache_release(struct page *page)
lruvec = lock_page_lruvec_irqsave(page, &flags);
VM_BUG_ON_PAGE(!PageLRU(page), page);
- __ClearPageLRU(page);
del_page_from_lru_list(page, lruvec);
- page_off_lru(page);
+ __clear_page_lru_flags(page);
unlock_page_lruvec_irqrestore(lruvec, flags);
}
__ClearPageWaiters(page);
@@ -911,9 +910,8 @@ void release_pages(struct page **pages, int nr)
lock_batch = 0;
VM_BUG_ON_PAGE(!PageLRU(page), page);
- __ClearPageLRU(page);
del_page_from_lru_list(page, lruvec);
- page_off_lru(page);
+ __clear_page_lru_flags(page);
}
__ClearPageWaiters(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7c65d47e6612..452dd3818aa3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1849,8 +1849,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
SetPageLRU(page);
if (unlikely(put_page_testzero(page))) {
- __ClearPageLRU(page);
- __ClearPageActive(page);
+ __clear_page_lru_flags(page);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&lruvec->lru_lock);
--
2.30.0.280.ga3ce27912f-goog
Powered by blists - more mailing lists