[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210527093336.14895-5-songmuchun@bytedance.com>
Date: Thu, 27 May 2021 17:33:28 +0800
From: Muchun Song <songmuchun@...edance.com>
To: guro@...com, hannes@...xchg.org, mhocko@...nel.org,
akpm@...ux-foundation.org, shakeelb@...gle.com,
vdavydov.dev@...il.com
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
duanxiongchun@...edance.com, fam.zheng@...edance.com,
bsingharora@...il.com, shy828301@...il.com, alexs@...nel.org,
smuchun@...il.com, zhengqi.arch@...edance.com,
Muchun Song <songmuchun@...edance.com>
Subject: [RFC PATCH v4 04/12] mm: vmscan: rework move_pages_to_lru()
In the later patch, we will reparent the LRU pages. The pages which will
move to appropriate LRU list can be reparented during the process of the
move_pages_to_lru(). So holding a lruvec lock by the caller is wrong, we
should use the more general interface of relock_page_lruvec_irq() to
acquire the correct lruvec lock.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/vmscan.c | 46 +++++++++++++++++++++++-----------------------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5a15748f9faf..731a8f5a4128 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2013,23 +2013,27 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
* move_pages_to_lru() moves pages from private @list to appropriate LRU list.
* On return, @list is reused as a list of pages to be freed by the caller.
*
- * Returns the number of pages moved to the given lruvec.
+ * Returns the number of pages moved to the appropriate LRU list.
+ *
+ * Note: The caller must not hold any lruvec lock.
*/
-static unsigned int move_pages_to_lru(struct lruvec *lruvec,
- struct list_head *list)
+static unsigned int move_pages_to_lru(struct list_head *list)
{
- int nr_pages, nr_moved = 0;
+ int nr_moved = 0;
+ struct lruvec *lruvec = NULL;
LIST_HEAD(pages_to_free);
- struct page *page;
while (!list_empty(list)) {
- page = lru_to_page(list);
+ int nr_pages;
+ struct page *page = lru_to_page(list);
+
+ lruvec = relock_page_lruvec_irq(page, lruvec);
VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
- spin_unlock_irq(&lruvec->lru_lock);
+ unlock_page_lruvec_irq(lruvec);
putback_lru_page(page);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
continue;
}
@@ -2050,19 +2054,15 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
__clear_page_lru_flags(page);
if (unlikely(PageCompound(page))) {
- spin_unlock_irq(&lruvec->lru_lock);
+ unlock_page_lruvec_irq(lruvec);
destroy_compound_page(page);
- spin_lock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
} else
list_add(&page->lru, &pages_to_free);
continue;
}
- /*
- * All pages were isolated from the same lruvec (and isolation
- * inhibits memcg migration).
- */
VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
@@ -2071,6 +2071,8 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
workingset_age_nonresident(lruvec, nr_pages);
}
+ if (lruvec)
+ unlock_page_lruvec_irq(lruvec);
/*
* To save our caller's stack, now use input list for pages to free.
*/
@@ -2144,16 +2146,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
- spin_lock_irq(&lruvec->lru_lock);
- move_pages_to_lru(lruvec, &page_list);
+ move_pages_to_lru(&page_list);
+ local_irq_disable();
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
- spin_unlock_irq(&lruvec->lru_lock);
+ local_irq_enable();
lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
@@ -2280,18 +2282,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
/*
* Move pages back to the lru list.
*/
- spin_lock_irq(&lruvec->lru_lock);
-
- nr_activate = move_pages_to_lru(lruvec, &l_active);
- nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
+ nr_activate = move_pages_to_lru(&l_active);
+ nr_deactivate = move_pages_to_lru(&l_inactive);
/* Keep all free pages in l_active list */
list_splice(&l_inactive, &l_active);
+ local_irq_disable();
__count_vm_events(PGDEACTIVATE, nr_deactivate);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
-
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- spin_unlock_irq(&lruvec->lru_lock);
+ local_irq_enable();
mem_cgroup_uncharge_list(&l_active);
free_unref_page_list(&l_active);
--
2.11.0
Powered by blists - more mailing lists