lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <1462309346.21143.11.camel@linux.intel.com>
Date:	Tue, 03 May 2016 14:02:26 -0700
From:	Tim Chen <tim.c.chen@...ux.intel.com>
To:	Andrew Morton <akpm@...ux-foundation.org>,
	Vladimir Davydov <vdavydov@...tuozzo.com>,
	Johannes Weiner <hannes@...xchg.org>,
	Michal Hocko <mhocko@...e.cz>,
	Minchan Kim <minchan@...nel.org>,
	Hugh Dickins <hughd@...gle.com>
Cc:	"Kirill A.Shutemov" <kirill.shutemov@...ux.intel.com>,
	Andi Kleen <andi@...stfloor.org>,
	Aaron Lu <aaron.lu@...el.com>,
	Huang Ying <ying.huang@...el.com>,
	linux-mm <linux-mm@...ck.org>, linux-kernel@...r.kernel.org
Subject: [PATCH 4/7] mm: Shrink page list batch allocates swap slots for
 page swapping

In shrink page list, we take advantage bulk allocation of swap entries
with the new get_swap_pages function. This reduces contention on a
swap device's swap_info lock. When the memory is low and the system is
actively trying to reclaim memory, both direct reclaim path and kswapd
contends on this lock when they access the same swap partition.

Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
 mm/vmscan.c | 63 ++++++++++++++++++++++++++++++++++++++++---------------------
 1 file changed, 42 insertions(+), 21 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index e36d8a7..310e2b2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1096,38 +1096,59 @@ static unsigned long shrink_anon_page_list(struct list_head *page_list,
 {
 	unsigned long nr_reclaimed = 0;
 	enum pg_result pg_dispose;
+	swp_entry_t swp_entries[SWAP_BATCH];
+	struct page *page;
+	int m, i, k;
 
 	while (n > 0) {
-		struct page *page;
 		int swap_ret = SWAP_SUCCESS;
 
-		--n;
-		if (list_empty(swap_pages))
-		       return nr_reclaimed;
+		m = get_swap_pages(n, swp_entries);
+		if (!m)
+			goto no_swap_slots;
+		n -= m;
+		for (i = 0; i < m; ++i) {
+			if (list_empty(swap_pages)) {
+				/* free any leftover swap slots */
+				for (k = i; k < m; ++k)
+					swapcache_free(swp_entries[k]);
+				return nr_reclaimed;
+			}
+			page = lru_to_page(swap_pages);
 
-		page = lru_to_page(swap_pages);
+			list_del(&page->lru);
 
-		list_del(&page->lru);
+			/*
+			* Anonymous process memory has backing store?
+			* Try to allocate it some swap space here.
+			*/
+
+			if (!add_to_swap(page, page_list, NULL)) {
+				pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret,
+						&nr_reclaimed, pgactivate,
+						ret_pages, free_pages);
+				continue;
+			}
 
-		/*
-		* Anonymous process memory has backing store?
-		* Try to allocate it some swap space here.
-		*/
+			if (clean)
+				pg_dispose = handle_pgout(page_list, zone, sc,
+						ttu_flags, PAGEREF_RECLAIM_CLEAN,
+						true, true, &swap_ret, page);
+			else
+				pg_dispose = handle_pgout(page_list, zone, sc,
+						ttu_flags, PAGEREF_RECLAIM,
+						true, true, &swap_ret, page);
 
-		if (!add_to_swap(page, page_list, NULL)) {
-			pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret, &nr_reclaimed,
+			pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed,
 					pgactivate, ret_pages, free_pages);
-			continue;
 		}
+	}
+	return nr_reclaimed;
 
-		if (clean)
-			pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
-				PAGEREF_RECLAIM_CLEAN, true, true, &swap_ret, page);
-		else
-			pg_dispose = handle_pgout(page_list, zone, sc, ttu_flags,
-				PAGEREF_RECLAIM, true, true, &swap_ret, page);
-
-		pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed,
+no_swap_slots:
+	while (!list_empty(swap_pages)) {
+		page = lru_to_page(swap_pages);
+		pg_finish(page, PG_ACTIVATE_LOCKED, 0, &nr_reclaimed,
 				pgactivate, ret_pages, free_pages);
 	}
 	return nr_reclaimed;
-- 
2.5.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ