lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1470760673-12420-7-git-send-email-ying.huang@intel.com>
Date:	Tue,  9 Aug 2016 09:37:48 -0700
From:	"Huang, Ying" <ying.huang@...el.com>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	tim.c.chen@...el.com, dave.hansen@...el.com, andi.kleen@...el.com,
	aaron.lu@...el.com,
	"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
	Andrea Arcangeli <aarcange@...hat.com>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org, Huang Ying <ying.huang@...el.com>,
	Hugh Dickins <hughd@...gle.com>, Shaohua Li <shli@...nel.org>,
	Minchan Kim <minchan@...nel.org>,
	Rik van Riel <riel@...hat.com>
Subject: [RFC 06/11] mm, THP, swap: Add get_huge_swap_page()

From: Huang Ying <ying.huang@...el.com>

A variation of get_swap_page(), get_huge_swap_page(), is added to
allocate a swap cluster (512 swap slots) based on the swap cluster
allocation function.  A fair simple algorithm is used, that is, only the
first swap device in priority list will be tried to allocate the swap
cluster.  The function will fail if that trying is not successful, and
the caller will fall back to allocate single swap slot instead.  This
works good enough for normal cases.

This will be used for THP (Transparent Huge Page) swap support.  Where
get_huge_swap_page() will be used to allocate one swap cluster for each
THP swapped out.

Cc: Andrea Arcangeli <aarcange@...hat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: Shaohua Li <shli@...nel.org>
Cc: Minchan Kim <minchan@...nel.org>
Cc: Rik van Riel <riel@...hat.com>
Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
---
 include/linux/swap.h | 21 ++++++++++++++++++++-
 mm/swapfile.c        | 29 +++++++++++++++++++++++------
 2 files changed, 43 insertions(+), 7 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 6988bce..95a526e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -399,7 +399,7 @@ static inline long get_nr_swap_pages(void)
 }
 
 extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(void);
+extern swp_entry_t __get_swap_page(bool huge);
 extern swp_entry_t get_swap_page_of_type(int);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
@@ -419,6 +419,20 @@ extern bool reuse_swap_page(struct page *, int *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
+static inline swp_entry_t get_swap_page(void)
+{
+	return __get_swap_page(false);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern swp_entry_t get_huge_swap_page(void);
+#else
+static inline swp_entry_t get_huge_swap_page(void)
+{
+	return (swp_entry_t) {0};
+}
+#endif
+
 #else /* CONFIG_SWAP */
 
 #define swap_address_space(entry)		(NULL)
@@ -525,6 +539,11 @@ static inline swp_entry_t get_swap_page(void)
 	return entry;
 }
 
+static inline swp_entry_t get_huge_swap_page(void)
+{
+	return (swp_entry_t) {0};
+}
+
 #endif /* CONFIG_SWAP */
 
 #ifdef CONFIG_MEMCG
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d710e0e..5cd78c7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -747,14 +747,15 @@ static unsigned long swap_alloc_huge_cluster(struct swap_info_struct *si)
 	return offset;
 }
 
-swp_entry_t get_swap_page(void)
+swp_entry_t __get_swap_page(bool huge)
 {
 	struct swap_info_struct *si, *next;
 	pgoff_t offset;
+	int nr_pages = huge_cluster_nr_entries(huge);
 
-	if (atomic_long_read(&nr_swap_pages) <= 0)
+	if (atomic_long_read(&nr_swap_pages) < nr_pages)
 		goto noswap;
-	atomic_long_dec(&nr_swap_pages);
+	atomic_long_sub(nr_pages, &nr_swap_pages);
 
 	spin_lock(&swap_avail_lock);
 
@@ -782,10 +783,15 @@ start_over:
 		}
 
 		/* This is called for allocating swap entry for cache */
-		offset = scan_swap_map(si, SWAP_HAS_CACHE);
+		if (likely(nr_pages == 1))
+			offset = scan_swap_map(si, SWAP_HAS_CACHE);
+		else
+			offset = swap_alloc_huge_cluster(si);
 		spin_unlock(&si->lock);
 		if (offset)
 			return swp_entry(si->type, offset);
+		else if (unlikely(nr_pages != 1))
+			goto fail_alloc;
 		pr_debug("scan_swap_map of si %d failed to find offset\n",
 		       si->type);
 		spin_lock(&swap_avail_lock);
@@ -805,12 +811,23 @@ nextsi:
 	}
 
 	spin_unlock(&swap_avail_lock);
-
-	atomic_long_inc(&nr_swap_pages);
+fail_alloc:
+	atomic_long_add(nr_pages, &nr_swap_pages);
 noswap:
 	return (swp_entry_t) {0};
 }
 
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+swp_entry_t get_huge_swap_page(void)
+{
+	if (SWAPFILE_CLUSTER != HPAGE_PMD_NR)
+		return (swp_entry_t) {0};
+
+	return __get_swap_page(true);
+}
+#endif
+
 /* The only caller of this function is now suspend routine */
 swp_entry_t get_swap_page_of_type(int type)
 {
-- 
2.8.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ