[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <155047806056.13111.495183724111676749.stgit@localhost.localdomain>
Date: Mon, 18 Feb 2019 11:21:00 +0300
From: Kirill Tkhai <ktkhai@...tuozzo.com>
To: akpm@...ux-foundation.org, daniel.m.jordan@...cle.com,
mhocko@...e.com, ktkhai@...tuozzo.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 3/4] mm: Remove pages_to_free argument of
move_active_pages_to_lru()
We may use input argument list as output argument too.
This makes the function more similar to putback_inactive_pages().
Signed-off-by: Kirill Tkhai <ktkhai@...tuozzo.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@...cle.com>
v2: Fix comment spelling.
---
mm/vmscan.c | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8d7d55e71511..656a9b5af2a4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2004,10 +2004,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list,
- struct list_head *pages_to_free,
enum lru_list lru)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ LIST_HEAD(pages_to_free);
struct page *page;
int nr_pages;
int nr_moved = 0;
@@ -2034,12 +2034,17 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
(*get_compound_page_dtor(page))(page);
spin_lock_irq(&pgdat->lru_lock);
} else
- list_add(&page->lru, pages_to_free);
+ list_add(&page->lru, &pages_to_free);
} else {
nr_moved += nr_pages;
}
}
+ /*
+ * To save our caller's stack, now use input list for pages to free.
+ */
+ list_splice(&pages_to_free, list);
+
return nr_moved;
}
@@ -2129,8 +2134,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
- nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
- nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
+ nr_activate = move_active_pages_to_lru(lruvec, &l_active, lru);
+ nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, lru - LRU_ACTIVE);
+ /* Keep all free pages in l_active list */
+ list_splice(&l_inactive, &l_active);
__count_vm_events(PGDEACTIVATE, nr_deactivate);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
@@ -2138,8 +2145,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge_list(&l_hold);
- free_unref_page_list(&l_hold);
+ mem_cgroup_uncharge_list(&l_active);
+ free_unref_page_list(&l_active);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
}
Powered by blists - more mailing lists