[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210814052519.86679-13-songmuchun@bytedance.com>
Date: Sat, 14 Aug 2021 13:25:19 +0800
From: Muchun Song <songmuchun@...edance.com>
To: guro@...com, hannes@...xchg.org, mhocko@...nel.org,
akpm@...ux-foundation.org, shakeelb@...gle.com,
vdavydov.dev@...il.com
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
duanxiongchun@...edance.com, fam.zheng@...edance.com,
bsingharora@...il.com, shy828301@...il.com, alexs@...nel.org,
smuchun@...il.com, zhengqi.arch@...edance.com,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v1 12/12] mm: lru: use lruvec lock to serialize memcg changes
As described by commit fc574c23558c ("mm/swap.c: serialize memcg
changes in pagevec_lru_move_fn"), TestClearPageLRU() aims to
serialize mem_cgroup_move_account() during pagevec_lru_move_fn().
Now folio_lruvec_lock*() has the ability to detect whether page
memcg has been changed. So we can use lruvec lock to serialize
mem_cgroup_move_account() during pagevec_lru_move_fn(). This
change is a partial revert of the commit fc574c23558c ("mm/swap.c:
serialize memcg changes in pagevec_lru_move_fn").
And pagevec_lru_move_fn() is more hot compare with
mem_cgroup_move_account(), removing an atomic operation would be
an optimization. Also this change would not dirty cacheline for a
page which isn't on the LRU.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/memcontrol.c | 31 +++++++++++++++++++++++++++++++
mm/swap.c | 45 ++++++++++++++-------------------------------
mm/vmscan.c | 9 ++++-----
3 files changed, 49 insertions(+), 36 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9464e6d2d735..7732ccf7d180 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1286,12 +1286,38 @@ struct lruvec *folio_lruvec_lock(struct folio *folio)
lruvec = folio_lruvec(folio);
spin_lock(&lruvec->lru_lock);
+ /*
+ * The memcg of the page can be changed by any the following routines:
+ *
+ * 1) mem_cgroup_move_account() or
+ * 2) memcg_reparent_objcgs()
+ *
+ * The possible bad scenario would like:
+ *
+ * CPU0: CPU1: CPU2:
+ * lruvec = folio_lruvec()
+ *
+ * if (!isolate_lru_page())
+ * mem_cgroup_move_account()
+ *
+ * memcg_reparent_objcgs()
+ *
+ * spin_lock(&lruvec->lru_lock)
+ * ^^^^^^
+ * wrong lock
+ *
+ * Either CPU1 or CPU2 can change page memcg, so we need to check
+ * whether page memcg is changed, if so, we should reacquire the
+ * new lruvec lock.
+ */
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock(&lruvec->lru_lock);
goto retry;
}
/*
+ * When we reach here, it means that the folio_memcg(folio) is stable.
+ *
* Preemption is disabled in the internal of spin_lock, which can serve
* as RCU read-side critical sections.
*/
@@ -1309,6 +1335,7 @@ struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
lruvec = folio_lruvec(folio);
spin_lock_irq(&lruvec->lru_lock);
+ /* See the comments in folio_lruvec_lock(). */
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
goto retry;
@@ -1330,6 +1357,7 @@ struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
lruvec = folio_lruvec(folio);
spin_lock_irqsave(&lruvec->lru_lock, *flags);
+ /* See the comments in folio_lruvec_lock(). */
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
goto retry;
@@ -5836,7 +5864,10 @@ static int mem_cgroup_move_account(struct page *page,
obj_cgroup_get(to->objcg);
obj_cgroup_put(from->objcg);
+ /* See the comments in folio_lruvec_lock(). */
+ spin_lock(&from_vec->lru_lock);
folio->memcg_data = (unsigned long)to->objcg;
+ spin_unlock(&from_vec->lru_lock);
__folio_memcg_unlock(from);
diff --git a/mm/swap.c b/mm/swap.c
index 9554ff008fe6..00b6776860e8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -191,14 +191,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
struct page *page = pvec->pages[i];
struct folio *folio = page_folio(page);
- /* block memcg migration during page moving between lru */
- if (!TestClearPageLRU(page))
- continue;
-
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
(*move_fn)(page, lruvec);
-
- SetPageLRU(page);
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
@@ -210,7 +204,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
{
struct folio *folio = page_folio(page);
- if (!folio_test_unevictable(folio)) {
+ if (folio_test_lru(folio) && !folio_test_unevictable(folio)) {
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
lruvec_add_folio_tail(lruvec, folio);
@@ -305,7 +299,8 @@ void lru_note_cost_folio(struct folio *folio)
static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
+ if (folio_test_lru(folio) && !folio_test_active(folio) &&
+ !folio_test_unevictable(folio)) {
long nr_pages = folio_nr_pages(folio);
lruvec_del_folio(lruvec, folio);
@@ -362,12 +357,9 @@ static void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
- if (folio_test_clear_lru(folio)) {
- lruvec = folio_lruvec_lock_irq(folio);
- __folio_activate(folio, lruvec);
- unlock_page_lruvec_irq(lruvec);
- folio_set_lru(folio);
- }
+ lruvec = folio_lruvec_lock_irq(folio);
+ __folio_activate(folio, lruvec);
+ unlock_page_lruvec_irq(lruvec);
}
#endif
@@ -520,6 +512,9 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
bool active = PageActive(page);
int nr_pages = thp_nr_pages(page);
+ if (!PageLRU(page))
+ return;
+
if (PageUnevictable(page))
return;
@@ -557,7 +552,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
{
- if (PageActive(page) && !PageUnevictable(page)) {
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
int nr_pages = thp_nr_pages(page);
del_page_from_lru_list(page, lruvec);
@@ -573,7 +568,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
{
- if (PageAnon(page) && PageSwapBacked(page) &&
+ if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
int nr_pages = thp_nr_pages(page);
@@ -983,8 +978,9 @@ void __pagevec_release(struct pagevec *pvec)
}
EXPORT_SYMBOL(__pagevec_release);
-static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
{
+ struct folio *folio = page_folio(page);
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
@@ -1040,20 +1036,7 @@ static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
*/
void __pagevec_lru_add(struct pagevec *pvec)
{
- int i;
- struct lruvec *lruvec = NULL;
- unsigned long flags = 0;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct folio *folio = page_folio(pvec->pages[i]);
-
- lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
- __pagevec_lru_add_fn(folio, lruvec);
- }
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- release_pages(pvec->pages, pvec->nr);
- pagevec_reinit(pvec);
+ pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn);
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 902d36ec91a3..7cff2f748df8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4667,18 +4667,17 @@ void check_move_unevictable_pages(struct pagevec *pvec)
nr_pages = thp_nr_pages(page);
pgscanned += nr_pages;
- /* block memcg migration during page moving between lru */
- if (!TestClearPageLRU(page))
+ lruvec = folio_lruvec_relock_irq(folio, lruvec);
+
+ if (!PageLRU(page) || !PageUnevictable(page))
continue;
- lruvec = folio_lruvec_relock_irq(folio, lruvec);
- if (page_evictable(page) && PageUnevictable(page)) {
+ if (page_evictable(page)) {
del_page_from_lru_list(page, lruvec);
ClearPageUnevictable(page);
add_page_to_lru_list(page, lruvec);
pgrescued += nr_pages;
}
- SetPageLRU(page);
}
if (lruvec) {
--
2.11.0
Powered by blists - more mailing lists