lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 14 Oct 2013 17:12:01 -0700
From:	Ning Qu <quning@...gle.com>
To:	Andrea Arcangeli <aarcange@...hat.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
	Hugh Dickins <hughd@...gle.com>
Cc:	Al Viro <viro@...iv.linux.org.uk>, Hugh Dickins <hughd@...gle.com>,
	Wu Fengguang <fengguang.wu@...el.com>, Jan Kara <jack@...e.cz>,
	Mel Gorman <mgorman@...e.de>, linux-mm@...ck.org,
	Andi Kleen <ak@...ux.intel.com>,
	Matthew Wilcox <willy@...ux.intel.com>,
	Hillf Danton <dhillf@...il.com>, Dave Hansen <dave@...1.net>,
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
	linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
	Ning Qu <quning@...gle.com>
Subject: [PATCH 02/12] mm, thp, tmpfs: support to add huge page into page
 cache for tmpfs

For replacing a page inside page cache, we assume the huge page
has been splitted before getting here.

For adding a new page to page cache, huge page support has been added.

Also refactor the shm_add_to_page_cache function.

Signed-off-by: Ning Qu <quning@...il.com>
---
 mm/shmem.c | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 88 insertions(+), 9 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index a857ba8..447bd14 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -277,27 +277,23 @@ static bool shmem_confirm_swap(struct address_space *mapping,
 }
 
 /*
- * Like add_to_page_cache_locked, but error if expected item has gone.
+ * Replace the swap entry with page cache entry
  */
-static int shmem_add_to_page_cache(struct page *page,
+static int shmem_replace_page_page_cache(struct page *page,
 				   struct address_space *mapping,
 				   pgoff_t index, gfp_t gfp, void *expected)
 {
 	int error;
 
-	VM_BUG_ON(!PageLocked(page));
-	VM_BUG_ON(!PageSwapBacked(page));
+	BUG_ON(PageTransHugeCache(page));
 
 	page_cache_get(page);
 	page->mapping = mapping;
 	page->index = index;
 
 	spin_lock_irq(&mapping->tree_lock);
-	if (!expected)
-		error = radix_tree_insert(&mapping->page_tree, index, page);
-	else
-		error = shmem_radix_tree_replace(mapping, index, expected,
-								 page);
+
+	error = shmem_radix_tree_replace(mapping, index, expected, page);
 	if (!error) {
 		mapping->nrpages++;
 		__inc_zone_page_state(page, NR_FILE_PAGES);
@@ -312,6 +308,87 @@ static int shmem_add_to_page_cache(struct page *page,
 }
 
 /*
+ * Insert new page into with page cache
+ */
+static int shmem_insert_page_page_cache(struct page *page,
+				   struct address_space *mapping,
+				   pgoff_t index, gfp_t gfp)
+{
+	int error;
+	int i, nr;
+
+	if (PageTransHugeCache(page))
+		BUILD_BUG_ON(HPAGE_CACHE_NR > RADIX_TREE_PRELOAD_NR);
+
+	nr = hpagecache_nr_pages(page);
+
+	error = radix_tree_maybe_preload_contig(nr, gfp & ~__GFP_HIGHMEM);
+	if (error)
+		return error;
+
+	spin_lock_irq(&mapping->tree_lock);
+	page_cache_get(page);
+	page->index = index;
+	page->mapping = mapping;
+	for (i = 0; i < nr; i++) {
+		error = radix_tree_insert(&mapping->page_tree,
+				index + i, page);
+		/*
+		 * In the midle of THP we can collide with small page which was
+		 * established before THP page cache is enabled or by other VMA
+		 * with bad alignement (most likely MAP_FIXED).
+		 */
+		if (error) {
+			i--; /* failed to insert anything at offset + i */
+			goto err;
+		}
+	}
+	radix_tree_preload_end();
+	mapping->nrpages += nr;
+	__mod_zone_page_state(page_zone(page), NR_FILE_PAGES, nr);
+	__mod_zone_page_state(page_zone(page), NR_SHMEM, nr);
+	if (PageTransHugeCache(page))
+		__inc_zone_page_state(page, NR_FILE_TRANSPARENT_HUGEPAGES);
+	spin_unlock_irq(&mapping->tree_lock);
+	return 0;
+err:
+	radix_tree_preload_end();
+	if (i != 0)
+		error = -ENOSPC; /* no space for a huge page */
+
+	/* Leave page->index set: truncation relies upon it */
+	page->mapping = NULL;
+	for (; i >= 0; i--)
+		radix_tree_delete(&mapping->page_tree, index + i);
+
+	spin_unlock_irq(&mapping->tree_lock);
+	page_cache_release(page);
+	return error;
+}
+
+/*
+ * Like add_to_page_cache_locked, but error if expected item has gone.
+ */
+static int shmem_add_to_page_cache(struct page *page,
+				   struct address_space *mapping,
+				   pgoff_t index, gfp_t gfp, void *expected)
+{
+	int error;
+
+	VM_BUG_ON(!PageLocked(page));
+	VM_BUG_ON(!PageSwapBacked(page));
+
+	if (expected) {
+		BUG_ON(PageTransHugeCache(page));
+		error = shmem_replace_page_page_cache(page, mapping, index, gfp,
+							expected);
+	} else
+		error = shmem_insert_page_page_cache(page, mapping, index, gfp);
+
+	return error;
+}
+
+/*
  * Like delete_from_page_cache, but substitutes swap for page.
  */
 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
@@ -319,6 +396,8 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
 	struct address_space *mapping = page->mapping;
 	int error;
 
+	BUG_ON(PageTransHugeCache(page));
+
 	spin_lock_irq(&mapping->tree_lock);
 	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
 	page->mapping = NULL;
-- 
1.8.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ