[<prev] [next>] [day] [month] [year] [list]
Message-ID: <1462309298.21143.9.camel@linux.intel.com>
Date: Tue, 03 May 2016 14:01:38 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Vladimir Davydov <vdavydov@...tuozzo.com>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...e.cz>,
Minchan Kim <minchan@...nel.org>,
Hugh Dickins <hughd@...gle.com>
Cc: "Kirill A.Shutemov" <kirill.shutemov@...ux.intel.com>,
Andi Kleen <andi@...stfloor.org>,
Aaron Lu <aaron.lu@...el.com>,
Huang Ying <ying.huang@...el.com>,
linux-mm <linux-mm@...ck.org>, linux-kernel@...r.kernel.org
Subject: [PATCH 2/7] mm: Group the processing of anonymous pages to be
swapped in shrink_page_list
This is a clean up patch to reorganize the processing of anonymous
pages in shrink_page_list.
We delay the processing of swapping anonymous pages in shrink_page_list
and put them together on a separate list. This prepares for batching
of pages to be swapped. The processing of the list of anonymous pages
to be swapped is consolidated in the function shrink_anon_page_list.
Functionally, there is no change in the logic of how pages are processed,
just the order of processing of the anonymous pages and file mapped
pages in shrink_page_list.
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
mm/vmscan.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 77 insertions(+), 5 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5542005..132ba02 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1083,6 +1083,58 @@ static void pg_finish(struct page *page,
}
}
+static unsigned long shrink_anon_page_list(struct list_head *page_list,
+ struct zone *zone,
+ struct scan_control *sc,
+ struct list_head *swap_pages,
+ struct list_head *ret_pages,
+ struct list_head *free_pages,
+ enum ttu_flags ttu_flags,
+ int *pgactivate,
+ int n,
+ bool clean)
+{
+ unsigned long nr_reclaimed = 0;
+ enum pg_result pg_dispose;
+
+ while (n > 0) {
+ struct page *page;
+ int swap_ret = SWAP_SUCCESS;
+
+ --n;
+ if (list_empty(swap_pages))
+ return nr_reclaimed;
+
+ page = lru_to_page(swap_pages);
+
+ list_del(&page->lru);
+
+ /*
+ * Anonymous process memory has backing store?
+ * Try to allocate it some swap space here.
+ */
+
+ if (!add_to_swap(page, page_list)) {
+ pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret, &nr_reclaimed,
+ pgactivate, ret_pages, free_pages);
+ continue;
+ }
+
+ if (clean)
+ pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
+ PAGEREF_RECLAIM_CLEAN, true, true, &swap_ret, page);
+ else
+ pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
+ PAGEREF_RECLAIM, true, true, &swap_ret, page);
+
+ pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed,
+ pgactivate, ret_pages, free_pages);
+ }
+ return nr_reclaimed;
+}
+
+
+
/*
* shrink_page_list() returns the number of reclaimed pages
*/
@@ -1099,6 +1151,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
+ LIST_HEAD(swap_pages);
+ LIST_HEAD(swap_pages_clean);
int pgactivate = 0;
unsigned long nr_unqueued_dirty = 0;
unsigned long nr_dirty = 0;
@@ -1106,6 +1160,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
unsigned long nr_reclaimed = 0;
unsigned long nr_writeback = 0;
unsigned long nr_immediate = 0;
+ unsigned long nr_swap = 0;
+ unsigned long nr_swap_clean = 0;
cond_resched();
@@ -1271,12 +1327,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
pg_dispose = PG_KEEP_LOCKED;
goto finish;
}
- if (!add_to_swap(page, page_list)) {
- pg_dispose = PG_ACTIVATE_LOCKED;
- goto finish;
+ if (references == PAGEREF_RECLAIM_CLEAN) {
+ list_add(&page->lru, &swap_pages_clean);
+ ++nr_swap_clean;
+ } else {
+ list_add(&page->lru, &swap_pages);
+ ++nr_swap;
}
- lazyfree = true;
- may_enter_fs = 1;
+
+ pg_dispose = PG_NEXT;
+ goto finish;
+
}
pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
@@ -1288,6 +1349,17 @@ finish:
}
+ nr_reclaimed += shrink_anon_page_list(page_list, zone, sc,
+ &swap_pages_clean, &ret_pages,
+ &free_pages, ttu_flags,
+ &pgactivate, nr_swap_clean,
+ true);
+ nr_reclaimed += shrink_anon_page_list(page_list, zone, sc,
+ &swap_pages, &ret_pages,
+ &free_pages, ttu_flags,
+ &pgactivate, nr_swap,
+ false);
+
mem_cgroup_uncharge_list(&free_pages);
try_to_unmap_flush();
free_hot_cold_page_list(&free_pages, true);
--
2.5.5
Powered by blists - more mailing lists