[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1594429136-20002-21-git-send-email-alex.shi@linux.alibaba.com>
Date: Sat, 11 Jul 2020 08:58:54 +0800
From: Alex Shi <alex.shi@...ux.alibaba.com>
To: akpm@...ux-foundation.org, mgorman@...hsingularity.net,
tj@...nel.org, hughd@...gle.com, khlebnikov@...dex-team.ru,
daniel.m.jordan@...cle.com, yang.shi@...ux.alibaba.com,
willy@...radead.org, hannes@...xchg.org, lkp@...el.com,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org, shakeelb@...gle.com,
iamjoonsoo.kim@....com, richard.weiyang@...il.com,
kirill@...temov.name
Cc: Andrey Ryabinin <aryabinin@...tuozzo.com>,
Jann Horn <jannh@...gle.com>
Subject: [PATCH v16 20/22] mm/vmscan: use relock for move_pages_to_lru
From: Hugh Dickins <hughd@...gle.com>
Use the relock function to replace relocking action. And try to save few
lock times.
Signed-off-by: Hugh Dickins <hughd@...gle.com>
Signed-off-by: Alex Shi <alex.shi@...ux.alibaba.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Tejun Heo <tj@...nel.org>
Cc: Andrey Ryabinin <aryabinin@...tuozzo.com>
Cc: Jann Horn <jannh@...gle.com>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: cgroups@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org
---
mm/vmscan.c | 17 ++++++-----------
1 file changed, 6 insertions(+), 11 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bdb53a678e7e..078a1640ec60 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1854,15 +1854,15 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
enum lru_list lru;
while (!list_empty(list)) {
- struct lruvec *new_lruvec = NULL;
-
page = lru_to_page(list);
VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
- spin_unlock_irq(&lruvec->lru_lock);
+ if (lruvec) {
+ spin_unlock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
+ }
putback_lru_page(page);
- spin_lock_irq(&lruvec->lru_lock);
continue;
}
@@ -1876,12 +1876,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
* list_add(&page->lru,)
* list_add(&page->lru,) //corrupt
*/
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- spin_unlock_irq(&lruvec->lru_lock);
- lruvec = lock_page_lruvec_irq(page);
- }
+ lruvec = relock_page_lruvec_irq(page, lruvec);
SetPageLRU(page);
if (unlikely(put_page_testzero(page))) {
@@ -1890,8 +1885,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&lruvec->lru_lock);
+ lruvec = NULL;
destroy_compound_page(page);
- spin_lock_irq(&lruvec->lru_lock);
} else
list_add(&page->lru, &pages_to_free);
--
1.8.3.1
Powered by blists - more mailing lists