[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1603968305-8026-12-git-send-email-alex.shi@linux.alibaba.com>
Date: Thu, 29 Oct 2020 18:44:56 +0800
From: Alex Shi <alex.shi@...ux.alibaba.com>
To: akpm@...ux-foundation.org, mgorman@...hsingularity.net,
tj@...nel.org, hughd@...gle.com, khlebnikov@...dex-team.ru,
daniel.m.jordan@...cle.com, willy@...radead.org,
hannes@...xchg.org, lkp@...el.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
shakeelb@...gle.com, iamjoonsoo.kim@....com,
richard.weiyang@...il.com, kirill@...temov.name,
alexander.duyck@...il.com, rong.a.chen@...el.com, mhocko@...e.com,
vdavydov.dev@...il.com, shy828301@...il.com
Subject: [PATCH v20 11/20] mm/lru: move lock into lru_note_cost
We have to move lru_lock into lru_note_cost, since it cycle up on memcg
tree, for future per lruvec lru_lock replace. It's a bit ugly and may
cost a bit more locking, but benefit from multiple memcg locking could
cover the lost.
Signed-off-by: Alex Shi <alex.shi@...ux.alibaba.com>
Acked-by: Hugh Dickins <hughd@...gle.com>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org
---
mm/swap.c | 3 +++
mm/vmscan.c | 4 +---
mm/workingset.c | 2 --
3 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 31fc3ebc1079..8798bd899db0 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -268,7 +268,9 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
{
do {
unsigned long lrusize;
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ spin_lock_irq(&pgdat->lru_lock);
/* Record cost event */
if (file)
lruvec->file_cost += nr_pages;
@@ -292,6 +294,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
lruvec->file_cost /= 2;
lruvec->anon_cost /= 2;
}
+ spin_unlock_irq(&pgdat->lru_lock);
} while ((lruvec = parent_lruvec(lruvec)));
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6c6965cbbdef..42bac12aacb4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1973,19 +1973,17 @@ static int current_may_throttle(void)
&stat, false);
spin_lock_irq(&pgdat->lru_lock);
-
move_pages_to_lru(lruvec, &page_list);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- lru_note_cost(lruvec, file, stat.nr_pageout);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
-
spin_unlock_irq(&pgdat->lru_lock);
+ lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
free_unref_page_list(&page_list);
diff --git a/mm/workingset.c b/mm/workingset.c
index 975a4d2dd02e..d8d2fdf70c24 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -381,9 +381,7 @@ void workingset_refault(struct page *page, void *shadow)
if (workingset) {
SetPageWorkingset(page);
/* XXX: Move to lru_cache_add() when it supports new vs putback */
- spin_lock_irq(&page_pgdat(page)->lru_lock);
lru_note_cost_page(page);
- spin_unlock_irq(&page_pgdat(page)->lru_lock);
inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
}
out:
--
1.8.3.1
Powered by blists - more mailing lists