[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180911005949.5635-2-daniel.m.jordan@oracle.com>
Date: Mon, 10 Sep 2018 20:59:46 -0400
From: Daniel Jordan <daniel.m.jordan@...cle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org
Cc: aaron.lu@...el.com, ak@...ux.intel.com, akpm@...ux-foundation.org,
dave.dice@...cle.com, dave.hansen@...ux.intel.com,
hannes@...xchg.org, levyossi@...oud.com,
ldufour@...ux.vnet.ibm.com, mgorman@...hsingularity.net,
mhocko@...nel.org, Pavel.Tatashin@...rosoft.com,
steven.sistare@...cle.com, tim.c.chen@...el.com,
vdavydov.dev@...il.com, ying.huang@...el.com
Subject: [RFC PATCH v2 5/8] mm: enable concurrent LRU removals
The previous patch used the concurrent algorithm serially to see that it
was stable for one task. Now in release_pages, take lru_lock as reader
instead of writer to allow concurrent removals from one or more LRUs.
Suggested-by: Yosef Lev <levyossi@...oud.com>
Signed-off-by: Daniel Jordan <daniel.m.jordan@...cle.com>
---
mm/swap.c | 28 +++++++++++++---------------
1 file changed, 13 insertions(+), 15 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 613b841bd208..b1030eb7f459 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -737,8 +737,8 @@ void release_pages(struct page **pages, int nr)
* same pgdat. The lock is held only if pgdat != NULL.
*/
if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
- write_unlock_irqrestore(&locked_pgdat->lru_lock,
- flags);
+ read_unlock_irqrestore(&locked_pgdat->lru_lock,
+ flags);
locked_pgdat = NULL;
}
@@ -748,9 +748,8 @@ void release_pages(struct page **pages, int nr)
/* Device public page can not be huge page */
if (is_device_public_page(page)) {
if (locked_pgdat) {
- write_unlock_irqrestore(
- &locked_pgdat->lru_lock,
- flags);
+ read_unlock_irqrestore(&locked_pgdat->lru_lock,
+ flags);
locked_pgdat = NULL;
}
put_zone_device_private_or_public_page(page);
@@ -763,9 +762,8 @@ void release_pages(struct page **pages, int nr)
if (PageCompound(page)) {
if (locked_pgdat) {
- write_unlock_irqrestore(
- &locked_pgdat->lru_lock,
- flags);
+ read_unlock_irqrestore(&locked_pgdat->lru_lock,
+ flags);
locked_pgdat = NULL;
}
__put_compound_page(page);
@@ -776,14 +774,14 @@ void release_pages(struct page **pages, int nr)
struct pglist_data *pgdat = page_pgdat(page);
if (pgdat != locked_pgdat) {
- if (locked_pgdat) {
- write_unlock_irqrestore(
- &locked_pgdat->lru_lock, flags);
- }
+ if (locked_pgdat)
+ read_unlock_irqrestore(
+ &locked_pgdat->lru_lock,
+ flags);
lock_batch = 0;
locked_pgdat = pgdat;
- write_lock_irqsave(&locked_pgdat->lru_lock,
- flags);
+ read_lock_irqsave(&locked_pgdat->lru_lock,
+ flags);
}
lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
@@ -800,7 +798,7 @@ void release_pages(struct page **pages, int nr)
list_add(&page->lru, &pages_to_free);
}
if (locked_pgdat)
- write_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
+ read_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
mem_cgroup_uncharge_list(&pages_to_free);
free_unref_page_list(&pages_to_free);
--
2.18.0
Powered by blists - more mailing lists