[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241230174621.61185-9-ryncsn@gmail.com>
Date: Tue, 31 Dec 2024 01:46:16 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Chris Li <chrisl@...nel.org>,
Barry Song <v-songbaohua@...o.com>,
Ryan Roberts <ryan.roberts@....com>,
Hugh Dickins <hughd@...gle.com>,
Yosry Ahmed <yosryahmed@...gle.com>,
"Huang, Ying" <ying.huang@...ux.alibaba.com>,
Nhat Pham <nphamcs@...il.com>,
Johannes Weiner <hannes@...xchg.org>,
Kalesh Singh <kaleshsingh@...gle.com>,
linux-kernel@...r.kernel.org,
Kairui Song <kasong@...cent.com>
Subject: [PATCH v3 08/13] mm, swap: use an enum to define all cluster flags and wrap flags changes
From: Kairui Song <kasong@...cent.com>
Currently, we are only using flags to indicate which list the cluster
is on. Using one bit for each list type might be a waste, as the list
type grows, we will consume too many bits. Additionally, the current
mixed usage of '&' and '==' is a bit confusing.
Make it clean by using an enum to define all possible cluster
statuses. Only an off-list cluster will have the NONE (0) flag.
And use a wrapper to annotate and sanitize all flag settings
and list movements.
Suggested-by: Chris Li <chrisl@...nel.org>
Signed-off-by: Kairui Song <kasong@...cent.com>
---
include/linux/swap.h | 17 +++++++---
mm/swapfile.c | 75 +++++++++++++++++++++++---------------------
2 files changed, 52 insertions(+), 40 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 02120f1005d5..339d7f0192ff 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,10 +257,19 @@ struct swap_cluster_info {
u8 order;
struct list_head list;
};
-#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
-#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
-#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
+
+/* All on-list cluster must have a non-zero flag. */
+enum swap_cluster_flags {
+ CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */
+ CLUSTER_FLAG_FREE,
+ CLUSTER_FLAG_NONFULL,
+ CLUSTER_FLAG_FRAG,
+ /* Clusters with flags above are allocatable */
+ CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG,
+ CLUSTER_FLAG_FULL,
+ CLUSTER_FLAG_DISCARD,
+ CLUSTER_FLAG_MAX,
+};
/*
* The first page in the swap file is the swap header, which is always marked
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 99fd0b0d84a2..7795a3d27273 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -403,7 +403,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
static inline bool cluster_is_free(struct swap_cluster_info *info)
{
- return info->flags & CLUSTER_FLAG_FREE;
+ return info->flags == CLUSTER_FLAG_FREE;
}
static inline unsigned int cluster_index(struct swap_info_struct *si,
@@ -434,6 +434,27 @@ static inline void unlock_cluster(struct swap_cluster_info *ci)
spin_unlock(&ci->lock);
}
+static void cluster_move(struct swap_info_struct *si,
+ struct swap_cluster_info *ci, struct list_head *list,
+ enum swap_cluster_flags new_flags)
+{
+ VM_WARN_ON(ci->flags == new_flags);
+ BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
+
+ if (ci->flags == CLUSTER_FLAG_NONE) {
+ list_add_tail(&ci->list, list);
+ } else {
+ if (ci->flags == CLUSTER_FLAG_FRAG) {
+ VM_WARN_ON(!si->frag_cluster_nr[ci->order]);
+ si->frag_cluster_nr[ci->order]--;
+ }
+ list_move_tail(&ci->list, list);
+ }
+ ci->flags = new_flags;
+ if (new_flags == CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]++;
+}
+
/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
struct swap_cluster_info *ci)
@@ -447,10 +468,8 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
*/
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
-
- VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
- list_move_tail(&ci->list, &si->discard_clusters);
- ci->flags = 0;
+ VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
+ cluster_move(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
schedule_work(&si->discard_work);
}
@@ -458,12 +477,7 @@ static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info
{
lockdep_assert_held(&si->lock);
lockdep_assert_held(&ci->lock);
-
- if (ci->flags)
- list_move_tail(&ci->list, &si->free_clusters);
- else
- list_add_tail(&ci->list, &si->free_clusters);
- ci->flags = CLUSTER_FLAG_FREE;
+ cluster_move(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
ci->order = 0;
}
@@ -479,6 +493,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
while (!list_empty(&si->discard_clusters)) {
ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
list_del(&ci->list);
+ /* Must clear flag when taking a cluster off-list */
+ ci->flags = CLUSTER_FLAG_NONE;
idx = cluster_index(si, ci);
spin_unlock(&si->lock);
@@ -519,9 +535,6 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *
lockdep_assert_held(&si->lock);
lockdep_assert_held(&ci->lock);
- if (ci->flags & CLUSTER_FLAG_FRAG)
- si->frag_cluster_nr[ci->order]--;
-
/*
* If the swap is discardable, prepare discard the cluster
* instead of free it immediately. The cluster will be freed
@@ -573,13 +586,9 @@ static void dec_cluster_info_page(struct swap_info_struct *si,
return;
}
- if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
- VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
- if (ci->flags & CLUSTER_FLAG_FRAG)
- si->frag_cluster_nr[ci->order]--;
- list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]);
- ci->flags = CLUSTER_FLAG_NONFULL;
- }
+ if (ci->flags != CLUSTER_FLAG_NONFULL)
+ cluster_move(si, ci, &si->nonfull_clusters[ci->order],
+ CLUSTER_FLAG_NONFULL);
}
static bool cluster_reclaim_range(struct swap_info_struct *si,
@@ -663,11 +672,13 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
if (!(si->flags & SWP_WRITEOK))
return false;
+ VM_BUG_ON(ci->flags == CLUSTER_FLAG_NONE);
+ VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE);
+
if (cluster_is_free(ci)) {
- if (nr_pages < SWAPFILE_CLUSTER) {
- list_move_tail(&ci->list, &si->nonfull_clusters[order]);
- ci->flags = CLUSTER_FLAG_NONFULL;
- }
+ if (nr_pages < SWAPFILE_CLUSTER)
+ cluster_move(si, ci, &si->nonfull_clusters[order],
+ CLUSTER_FLAG_NONFULL);
ci->order = order;
}
@@ -675,14 +686,8 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
swap_range_alloc(si, nr_pages);
ci->count += nr_pages;
- if (ci->count == SWAPFILE_CLUSTER) {
- VM_BUG_ON(!(ci->flags &
- (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
- if (ci->flags & CLUSTER_FLAG_FRAG)
- si->frag_cluster_nr[ci->order]--;
- list_move_tail(&ci->list, &si->full_clusters);
- ci->flags = CLUSTER_FLAG_FULL;
- }
+ if (ci->count == SWAPFILE_CLUSTER)
+ cluster_move(si, ci, &si->full_clusters, CLUSTER_FLAG_FULL);
return true;
}
@@ -821,9 +826,7 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
while (!list_empty(&si->nonfull_clusters[order])) {
ci = list_first_entry(&si->nonfull_clusters[order],
struct swap_cluster_info, list);
- list_move_tail(&ci->list, &si->frag_clusters[order]);
- ci->flags = CLUSTER_FLAG_FRAG;
- si->frag_cluster_nr[order]++;
+ cluster_move(si, ci, &si->frag_clusters[order], CLUSTER_FLAG_FRAG);
offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
&found, order, usage);
frags++;
--
2.47.1
Powered by blists - more mailing lists