lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue,  4 Jun 2024 12:24:49 +0800
From: alexs@...nel.org
To: Andrew Morton <akpm@...ux-foundation.org>,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	izik.eidus@...ellosystems.com,
	willy@...radead.org,
	aarcange@...hat.com,
	chrisw@...s-sol.org,
	hughd@...gle.com,
	david@...hat.com
Cc: "Alex Shi (tencent)" <alexs@...nel.org>
Subject: [PATCH 07/10] mm/ksm: use folio in unstable_tree_search_insert

From: "Alex Shi (tencent)" <alexs@...nel.org>

The calling path are using folio actually, so start to folio in
unstable_tree_search_insert to save few compound checks.

Signed-off-by: Alex Shi (tencent) <alexs@...nel.org>
---
 mm/ksm.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index 24de562b64e1..14a7ca53fc91 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2194,15 +2194,14 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
  */
 static
 struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
-					      struct page *page,
-					      struct page **tree_pagep)
+						  struct folio *folio, struct folio **tree_foliop)
 {
 	struct rb_node **new;
 	struct rb_root *root;
 	struct rb_node *parent = NULL;
 	int nid;
 
-	nid = get_kpfn_nid(page_to_pfn(page));
+	nid = get_kpfn_nid(folio_pfn(folio));
 	root = root_unstable_tree + nid;
 	new = &root->rb_node;
 
@@ -2220,12 +2219,12 @@ struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_ite
 		/*
 		 * Don't substitute a ksm page for a forked page.
 		 */
-		if (page == tree_page) {
+		if (folio == page_folio(tree_page)) {
 			put_page(tree_page);
 			return NULL;
 		}
 
-		ret = memcmp_pages(page, tree_page);
+		ret = memcmp_pages(folio_page(folio, 0), tree_page);
 
 		parent = *new;
 		if (ret < 0) {
@@ -2244,7 +2243,7 @@ struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_ite
 			put_page(tree_page);
 			return NULL;
 		} else {
-			*tree_pagep = tree_page;
+			*tree_foliop = page_folio(tree_page);
 			return tree_rmap_item;
 		}
 	}
@@ -2312,6 +2311,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
 	struct mm_struct *mm = rmap_item->mm;
 	struct ksm_rmap_item *tree_rmap_item;
 	struct page *tree_page = NULL;
+	struct folio *tree_folio = NULL;
 	struct ksm_stable_node *stable_node;
 	struct page *kpage;
 	unsigned int checksum;
@@ -2411,7 +2411,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
 			return;
 	}
 	tree_rmap_item =
-		unstable_tree_search_insert(rmap_item, page, &tree_page);
+		unstable_tree_search_insert(rmap_item, folio, &tree_folio);
 	if (tree_rmap_item) {
 		bool split;
 
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ