lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240618232648.4090299-4-ryan.roberts@arm.com>
Date: Wed, 19 Jun 2024 00:26:43 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Andrew Morton <akpm@...ux-foundation.org>,
	Chris Li <chrisl@...nel.org>,
	Kairui Song <kasong@...cent.com>,
	"Huang, Ying" <ying.huang@...el.com>,
	Kalesh Singh <kaleshsingh@...gle.com>,
	Barry Song <baohua@...nel.org>,
	Hugh Dickins <hughd@...gle.com>,
	David Hildenbrand <david@...hat.com>
Cc: Ryan Roberts <ryan.roberts@....com>,
	linux-kernel@...r.kernel.org,
	linux-mm@...ck.org
Subject: [RFC PATCH v1 3/5] mm: swap: Track allocation order for clusters

Add an `order` field to `struct swap_cluster_info`, which applies to
allocated clusters (i.e. those not on the free list) and tracks the swap
entry order that the cluster should be used to allocate. A future commit
will use this information to scan partially filled clusters to find
appropriate free swap entries for allocation. Note that it is still
possible that order-0 swap entries will be allocated in clusters that
indicate a higher order due to the order-0 scanning mechanism.

The maximum order we ever expect to see is 13 - PMD-size on arm64 with
64K base pages. 13 fits into 4 bits, so let's steal 4 unused flags bits
for this purpose to avoid making `struct swap_cluster_info` any bigger.

Signed-off-by: Ryan Roberts <ryan.roberts@....com>
---
 include/linux/swap.h |  3 ++-
 mm/swapfile.c        | 24 +++++++++++++++---------
 2 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 66566251ba31..2a40fe02d281 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -255,7 +255,8 @@ struct swap_cluster_info {
 				 * cluster
 				 */
 	unsigned int data:24;
-	unsigned int flags:8;
+	unsigned int flags:4;
+	unsigned int order:4;
 };
 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 30e79739dfdc..7b13f02a7ac2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -307,11 +307,13 @@ static inline void cluster_set_count(struct swap_cluster_info *info,
 	info->data = c;
 }

-static inline void cluster_set_count_flag(struct swap_cluster_info *info,
-					 unsigned int c, unsigned int f)
+static inline void cluster_set_count_flag_order(struct swap_cluster_info *info,
+						unsigned int c, unsigned int f,
+						unsigned int o)
 {
 	info->flags = f;
 	info->data = c;
+	info->order = o;
 }

 static inline unsigned int cluster_next(struct swap_cluster_info *info)
@@ -330,6 +332,7 @@ static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 {
 	info->flags = f;
 	info->data = n;
+	info->order = 0;
 }

 static inline bool cluster_is_free(struct swap_cluster_info *info)
@@ -346,6 +349,7 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
 {
 	info->flags = CLUSTER_FLAG_NEXT_NULL;
 	info->data = 0;
+	info->order = 0;
 }

 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
@@ -521,13 +525,14 @@ static void swap_users_ref_free(struct percpu_ref *ref)
 	complete(&si->comp);
 }

-static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
+static void alloc_cluster(struct swap_info_struct *si, unsigned long idx,
+			  int order)
 {
 	struct swap_cluster_info *ci = si->cluster_info;

 	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 	cluster_list_del_first(&si->free_clusters, ci);
-	cluster_set_count_flag(ci + idx, 0, 0);
+	cluster_set_count_flag_order(ci + idx, 0, 0, order);
 }

 static void free_cluster(struct swap_info_struct *si, unsigned long idx)
@@ -556,14 +561,15 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx)
  */
 static void add_cluster_info_page(struct swap_info_struct *p,
 	struct swap_cluster_info *cluster_info, unsigned long page_nr,
-	unsigned long count)
+	int order)
 {
 	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+	unsigned long count = 1 << order;

 	if (!cluster_info)
 		return;
 	if (cluster_is_free(&cluster_info[idx]))
-		alloc_cluster(p, idx);
+		alloc_cluster(p, idx, order);

 	VM_BUG_ON(cluster_count(&cluster_info[idx]) + count > SWAPFILE_CLUSTER);
 	cluster_set_count(&cluster_info[idx],
@@ -577,7 +583,7 @@ static void add_cluster_info_page(struct swap_info_struct *p,
 static void inc_cluster_info_page(struct swap_info_struct *p,
 	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 {
-	add_cluster_info_page(p, cluster_info, page_nr, 1);
+	add_cluster_info_page(p, cluster_info, page_nr, 0);
 }

 /*
@@ -964,7 +970,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
 			goto done;
 	}
 	memset(si->swap_map + offset, usage, nr_pages);
-	add_cluster_info_page(si, si->cluster_info, offset, nr_pages);
+	add_cluster_info_page(si, si->cluster_info, offset, order);
 	unlock_cluster(ci);

 	swap_range_alloc(si, offset, nr_pages);
@@ -1060,7 +1066,7 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)

 	ci = lock_cluster(si, offset);
 	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
-	cluster_set_count_flag(ci, 0, 0);
+	cluster_set_count_flag_order(ci, 0, 0, 0);
 	free_cluster(si, idx);
 	unlock_cluster(ci);
 	swap_range_free(si, offset, SWAPFILE_CLUSTER);
--
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ