[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <154998444053.18704.14821278988281142015.stgit@localhost.localdomain>
Date: Tue, 12 Feb 2019 18:14:00 +0300
From: Kirill Tkhai <ktkhai@...tuozzo.com>
To: akpm@...ux-foundation.org, mhocko@...e.com, ktkhai@...tuozzo.com,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH 1/4] mm: Move recent_rotated pages calculation to
shrink_inactive_list()
Currently, struct reclaim_stat::nr_activate is a local variable,
used only in shrink_page_list(). This patch introduces another
local variable pgactivate to use instead of it, and reuses
nr_activate to account number of active pages.
Note, that we need nr_activate to be an array, since type of page
may change during shrink_page_list() (see ClearPageSwapBacked()).
Signed-off-by: Kirill Tkhai <ktkhai@...tuozzo.com>
---
include/linux/vmstat.h | 2 +-
mm/vmscan.c | 15 +++++++--------
2 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2db8d60981fe..bdeda4b079fe 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -26,7 +26,7 @@ struct reclaim_stat {
unsigned nr_congested;
unsigned nr_writeback;
unsigned nr_immediate;
- unsigned nr_activate;
+ unsigned nr_activate[2];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ac4806f0f332..84542004a277 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1107,6 +1107,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
unsigned nr_reclaimed = 0;
+ unsigned pgactivate = 0;
memset(stat, 0, sizeof(*stat));
cond_resched();
@@ -1466,8 +1467,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
try_to_free_swap(page);
VM_BUG_ON_PAGE(PageActive(page), page);
if (!PageMlocked(page)) {
+ int type = page_is_file_cache(page);
SetPageActive(page);
- stat->nr_activate++;
+ pgactivate++;
+ stat->nr_activate[type] += hpage_nr_pages(page);
count_memcg_page_event(page, PGACTIVATE);
}
keep_locked:
@@ -1482,7 +1485,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
free_unref_page_list(&free_pages);
list_splice(&ret_pages, page_list);
- count_vm_events(PGACTIVATE, stat->nr_activate);
+ count_vm_events(PGACTIVATE, pgactivate);
return nr_reclaimed;
}
@@ -1807,7 +1810,6 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
static noinline_for_stack void
putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
LIST_HEAD(pages_to_free);
@@ -1833,11 +1835,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
lru = page_lru(page);
add_page_to_lru_list(page, lruvec, lru);
- if (is_active_lru(lru)) {
- int file = is_file_lru(lru);
- int numpages = hpage_nr_pages(page);
- reclaim_stat->recent_rotated[file] += numpages;
- }
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
@@ -1945,6 +1942,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_DIRECT,
nr_reclaimed);
}
+ reclaim_stat->recent_rotated[0] = stat.nr_activate[0];
+ reclaim_stat->recent_rotated[1] = stat.nr_activate[1];
putback_inactive_pages(lruvec, &page_list);
Powered by blists - more mailing lists